diff --git a/.env b/.env index 92cf7eea04..9448099e0c 100644 --- a/.env +++ b/.env @@ -61,3 +61,21 @@ ANONYMOUS_UUID= # nvidia for gpu, runc for cpu. SERVER_RUNTIME=nvidia + +# Dispatch training task to OPENPAI. +OPENPAI_HOST= +OPENPAI_TOKEN= +OPENPAI_STORAGE= +OPENPAI_USER= +OPENPAI_CLUSTER= +OPENPAI_GPUTYPE= + +# viewer +MONGODB_HOST_PORT=27019 +MONGODB_USE_CACHE=True + +# model deployment module env +# if you don't want to install model deployment module, set DEPLOY_MODULE_HOST_PORT to empty +DEPLOY_MODULE_HOST_PORT=18801 +DEPLOY_MODULE_URL=${DEPLOY_MODULE_HOST_PORT} +DEPLOY_MODULE_MYSQL_ROOT_PASSWORD=deploy_db_passwd diff --git a/.github/workflows/backend-pr.yml b/.github/workflows/backend-pr.yml index 8431d3ae3f..7ea5e6494e 100644 --- a/.github/workflows/backend-pr.yml +++ b/.github/workflows/backend-pr.yml @@ -1,12 +1,13 @@ # This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: unittest +name: backend-unittest on: pull_request: branches: - master - dev + - prerelease env: PYTHONPATH: ./ymir/backend @@ -24,8 +25,17 @@ jobs: python-version: 3.8 - name: Install dependencies run: | - pip install tox + time pip install tox working-directory: ymir/backend + - name: Cache Tox Environment + uses: actions/cache@v3 + with: + path: | + ymir/backend/.tox + ymir/backend/.mypy_cache + # requirements.txt and requirements-dev.txt have versioning info that would + # impact the tox environment. + key: tox-${{ hashFiles('ymir/backend/requirements.txt', 'ymir/backend/requirements-dev.txt') }} - name: Test with tox run: | git config --global user.name 'CI-test' diff --git a/.github/workflows/command-pr.yml b/.github/workflows/command-pr.yml index c55d077197..344f1f90a1 100644 --- a/.github/workflows/command-pr.yml +++ b/.github/workflows/command-pr.yml @@ -1,13 +1,14 @@ # This workflow will install Python dependencies, run tests and lint with a single version of Python # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions -name: uniitest +name: command-uniitest on: pull_request: branches: - master - dev + - prerelease jobs: command-ci: @@ -20,22 +21,29 @@ jobs: uses: actions/setup-python@v2 with: python-version: 3.8 + - name: Cache Python Environment + uses: actions/cache@v3 + with: + path: | + ymir/command/.mypy_cache + ${{ env.pythonLocation }} + key: ${{ env.pythonLocation }}-${{ hashFiles('ymir/command/requirements.txt', 'ymir/command/requirements-ci.txt') }} - name: Install dependencies run: | - pip install -r requirements.txt - pip install -r requirements-ci.txt + time pip install -r requirements.txt + time pip install -r requirements-ci.txt working-directory: ymir/command - name: mypy run: | - mypy mir + time mypy mir working-directory: ymir/command - name: flake8 run: | - flake8 mir + time flake8 mir working-directory: ymir/command - name: Test with pytest run: | git config --global user.name 'CI-TEST' git config --global user.email 'ci-test@ymir-team' - python tests/__main__.py + time python tests/__main__.py working-directory: ymir/command diff --git a/.github/workflows/executor-framework-pr.yml b/.github/workflows/exc-sdk-pr.yml similarity index 76% rename from .github/workflows/executor-framework-pr.yml rename to .github/workflows/exc-sdk-pr.yml index 09752f16f7..77565748ea 100644 --- a/.github/workflows/executor-framework-pr.yml +++ b/.github/workflows/exc-sdk-pr.yml @@ -8,11 +8,12 @@ on: branches: - master - dev + - prerelease paths: - 'docker_executor/sample_executor/**' jobs: - executor-framework-ci: + exc-sdk-ci: runs-on: ubuntu-latest @@ -24,17 +25,17 @@ jobs: python-version: 3.8 - name: Install dependencies run: | - pip install -r executor/requirements.txt - pip install -r executor/requirements-ci.txt + pip install -r ymir_exc/requirements.txt + pip install -r ymir_exc/requirements-ci.txt working-directory: docker_executor/sample_executor - name: mypy run: | mypy . - working-directory: docker_executor/sample_executor/executor + working-directory: docker_executor/sample_executor/ymir_exc - name: flake8 run: | flake8 . - working-directory: docker_executor/sample_executor/executor + working-directory: docker_executor/sample_executor/ymir_exc - name: Test with pytest run: | python tests/__main__.py diff --git a/.github/workflows/hel-pr.yml b/.github/workflows/hel-pr.yml new file mode 100644 index 0000000000..69e0816fdf --- /dev/null +++ b/.github/workflows/hel-pr.yml @@ -0,0 +1,48 @@ +name: hel-unittest + +on: + pull_request: + branches: + - master + - dev + - prerelease + +jobs: + hel-ci: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.18 + + - name: Cache Go Environment + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Verify dependencies + working-directory: ymir/backend/src/ymir_hel + run: | + time go mod verify + time go install honnef.co/go/tools/cmd/staticcheck@latest + + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + version: v1.48 + working-directory: ymir/backend/src/ymir_hel + + - name: Lint and test code + working-directory: ymir/backend/src/ymir_hel + run: | + git config --global user.name 'CI-TEST' + git config --global user.email 'ci-test@ymir-team' + bash run_tests.sh diff --git a/.github/workflows/web-pr.yml b/.github/workflows/web-pr.yml index 6b5bf673fe..29665cda36 100644 --- a/.github/workflows/web-pr.yml +++ b/.github/workflows/web-pr.yml @@ -5,7 +5,11 @@ name: Node.js CI on: pull_request: - branches: [ master, dev ] + branches: + - master + - dev + - prerelease + jobs: web-ci: @@ -14,14 +18,15 @@ jobs: steps: - uses: actions/checkout@v2 - - name: Use Node.js 16.x - uses: actions/setup-node@v2 + - name: Use Node.js 16.10 + uses: actions/setup-node@v3 with: - node-version: 16.x + # node-version: 16.12 or above has issue at caching. + node-version: 16.10 cache: 'npm' cache-dependency-path: ymir/web/package-lock.json - name: Test with npm run: | - npm install - npm run test:coverage + time npm ci + time CI=true npm run test:coverage working-directory: ymir/web diff --git a/.gitignore b/.gitignore index 16d6381e98..df34016891 100755 --- a/.gitignore +++ b/.gitignore @@ -47,6 +47,9 @@ coverage.xml .hypothesis/ venv/ .python-version +Library/ +.mypy_cache/ +.gitconfig # Translations *.mo @@ -81,7 +84,11 @@ output/ mysql/ redis/ ymir-data/ -ymir-workplace +ymir-workplace* .mir_lock .local .pre-commit-config.yaml +ymir/backend/alembic +ymir/backend/static + +*/updater/app/mir/protos/*.pyi diff --git a/README.md b/README.md index 1b4025e893..d994cab790 100755 --- a/README.md +++ b/README.md @@ -30,16 +30,15 @@
 
- -[📘Usage Instruction](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#3-gui%E4%BD%BF%E7%94%A8-%E5%85%B8%E5%9E%8B%E6%A8%A1%E5%9E%8B%E7%94%9F%E4%BA%A7%E6%B5%81%E7%A8%8B) | -[🛠️Installation](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#2-%E5%AE%89%E8%A3%85) | -[👀Equipment](https://c1n.cn/obvHf) | + +[📘Usage Instruction](https://github.com/IndustryEssentials/ymir/wiki/Operating-Instructions) | +[🛠️Installation](README.md#2-installation) | [🚀Projects](https://github.com/IndustryEssentials/ymir/projects) | [🤔Issues Report](https://github.com/IndustryEssentials/ymir/issues/new/choose) | [📰Lisence](https://github.com/IndustryEssentials/ymir/blob/master/LICENSE) - +   - +
@@ -47,27 +46,70 @@ 📫 Feedback on usage issues: contact.viesc@gmail.com / Professional consulting for server equipment: tensor.station@gmail.com
 
 
+English | [简体中文](README_zh-CN.md) + # Citations If you wish to refer to YMIR in your work, please use the following BibTeX entry. ```bibtex @inproceedings{huang2021ymir, - title={YMIR: A Rapid Data-centric Development Platform for Vision Applications}, + title={YMIR: A Rapid Data-centric Development Platform for Vision Applications}, author={Phoenix X. Huang and Wenze Hu and William Brendel and Manmohan Chandraker and Li-Jia Li and Xiaoyu Wang}, booktitle={Proceedings of the Data-Centric AI Workshop at NeurIPS}, year={2021}, } ``` -## Introduction +# What's new -English | [简体中文](README_zh-CN.md) +Version 2.0.0 updated on 11/08/2022 + +YMIR platform +- A new model performance diagnosis module. +- A new function for visual evaluation of model inference results. +- Adding a public algorithm library with a variety of built-in high-precision algorithms. +- One-click deployment function, supporting the deployment of algorithms to prerequisite certified devices. +- New operating instruction. +- Refactory code structure. + +Docker +- Support [yolov5](https://github.com/ultralytics/yolov5) +- Support [mmdetection](https://github.com/open-mmlab/mmdetection) +- Support [yolov7](https://github.com/wongkinyiu/yolov7) +- Support [detectron2](https://github.com/facebookresearch/detectron2) +- Support [An Extendable, Efficient and Effective Transformer-based Object Detector](https://github.com/naver-ai/vidt) +- Support [ymir image testing tool library](https://github.com/modelai/ymir-executor-verifier) +- Support [demo sample image creation documentation](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi) +- Support [ymir mirror development extension library](https://github.com/modelai/ymir-executor-sdk) + +View more [ymir-executor-fork](https://github.com/modelai/ymir-executor-fork) . + +Within the public dockerimage +- Update yolov5 training image: youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-tmi +- Update the mmdetection training image: youdaoyzbx/ymir-executor:ymir1.3.0-mmdet-cu111-tmi +- Update the yolov5 training image to support rv1126 chip deployment: youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-modelstore +- Update the training image to support yolov5-v6.2: youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-v6.2-cu111-tmi + +More code updates [ymir-dev](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev). + +# Deployment Prerequisite (optional) + +YMIR supports deploying the trained model and public algorithm model directly to the certified device, for more hardware specs, please check [the details](https://i-item.jd.com/10065116628109.html). + +
+ +
 
 
+ +## Introduction + **Catalog** - [Citations](#citations) +- [What's New](#whats-new) +- [Depolyment Prerequisite (optional)](#deployment-prerequisite-optional) - [1. Introduction to AI SUITE-YMIR](#1-introduction-to-ai-suite-ymir) - [1.1. Main functions](#11-main-functions) - [1.2. Apply for trial](#12-apply-for-trial) @@ -76,37 +118,13 @@ English | [简体中文](README_zh-CN.md) - [2.2. Installation of YMIR-GUI](#22-installation-of-ymir-gui) - [2.3. Installation of label studio (optional)](#23-installation-of-label-studio-optional) - [3. Use YMIR-GUI: typical model production process](#3-use-ymir-gui-typical-model-production-process) - - [3.1. Label management](#31-label-management) - - [3.2. Project management](#32-project-management) - - [3.2.1 Iteration data preparation](#321-iteration-data-preparation) - - [3.2.2 Initial model preparation](#322-initial-model-preparation) - - [3.2.3 Mining data preparation](#323-mining-data-preparation) - - [3.2.4 Data mining](#324-data-mining) - - [3.2.5 Data labeling](#325-data-labeling) - - [3.2.6 Update trainingset](#326-update-trainingset) - - [3.2.7 Model iteration](#327-model-iteration) - - [3.2.8 Model validation](#328-model-validation) - - [3.2.9 Model download](#328-model-download) - [4. For advanced users: YMIR-CMD (command line) user's guide](#4-for-advanced-users-ymir-cmd-command-line-users-guide) - [4.1 Installation](#41-installation) - [4.2 Typical model production process](#42-typical-model-production-process) - - [4.2.1 Preparation of external data](#421-preparation-of-external-data) - - [4.2.2 Create local repo and import data](#422-create-local-repo-and-import-data) - - [4.2.3 Merge and filter](#423-merge-and-filter) - - [4.2.4 Train the initial model](#424-train-the-initial-model) - - [4.2.5 Data mining](#425-data-mining) - - [4.2.6 Data labeling](#426-data-labeling) - - [4.2.7 Model iteration-data merging](#427-model-iteration-data-merging) - - [4.2.8 Model iteration-model training](#428-model-iteration-model-training) - - [4.3. YMIR-CMD manual](#43-ymir-cmd-manual) - [5. Get the code](#5-get-the-code) - - [5.1. YMIR repos](#51-ymir-repos) - - [5.2. Code contribution](#52-code-contribution) - - [5.3. About training, inference and mining docker images](#53-about-training-inference-and-mining-docker-images) + - [5.1. Code contribution](#51-code-contribution) + - [5.2. About training, inference and mining docker images](#52-about-training-inference-and-mining-docker-images) - [6. Design concept](#6-design-concept) - - [6.1. Life of a dataset](#61-life-of-a-dataset) - - [6.1.1. Introduction to a dataset](#611-introduction-to-a-dataset) - - [6.1.2. Branch and dataset management](#612-branch-and-dataset-management) - [7. MISC](#7-misc) - [7.1. FAQ](#71-faq) @@ -117,9 +135,9 @@ English | [简体中文](README_zh-CN.md) As a streamlined model development product, YMIR(You Mine In Recursion) focuses on the dataset versioning and model iteration in the AI SUITE open-source series.
- +
 
 
- + AI commercialization is currently reaching a stage of maturity in terms of computing hardwares, algorithms, etc. The adoption of AI often encounter challenges such as a lack of skilled developers, high development costs and long iteration cycles. As a platform, YMIR provides an end-to-end AI development system. This platform reduces costs for companies using artificial intelligence and accelerates the adoption of artificial intelligence. YMIR provides ML developers with one-stop services for data processing, model training, and other steps required in the AI development cycle. @@ -140,28 +158,35 @@ A typical model development process can usually be summarized in a few steps: de * Deploy model: Models are developed and trained based on previously available data (possibly test data). After a satisfactory model is obtained, it will be applied to real data to make predictions at scale. -YMIR platform mainly meets the needs of users to produce models at scale, provides users with a good and easy-to-use display interface, and facilitates the management and viewing of data and models. The platform contains main functional modules such as project management, tag management, system configuration, etc. It supports the realization of the following main functions. +YMIR platform mainly meets the needs of users to produce models at scale, provides users with a good and easy-to-use display interface, and facilitates the management and viewing of data and models. The platform contains main functional modules such as project management, tag management, model deployment, system configuration, dockerimage management, etc. It supports the realization of the following main functions. -|Function Module|Primary Function|Secondary Function|Function Description| +| Function Module | Primary Function | Secondary Function | Function Description | |----------|-----------|------------|-----------------------------------------| |Project Management|Project Management|Project Editing|Supports adding, deleting, and editing projects and project information| |Project Management|Iteration Management|Iteration Preparation|Supports setting up the dataset and model information needed for iteration| |Project Management|Iteration Management|Iteration Steps|Support to populate the data from the previous round to the next step corresponding to the task parameters| |Project Management|Iteration Management|Iteration Charts|Support to display the datasets and models generated during the iterative process in the interface as a graphical comparison| -|Project Management|Dataset Management|Import datasets|Support users to import prepared datasets by copying public datasets, url addresses, paths and local imports| +|Project Management|Dataset Management|Import datasets|Support users to import prepared datasets by copying public datasets, url addresses, paths, and local imports| |Project Management|Data Set Management|View Data Sets|Supports visualization of image data and annotations, and viewing of historical information| |Project Management|Data Set Management|Edit Data Set|Support editing and deleting data sets |Project Management|Dataset Management|Dataset Versions|Support creating new dataset versions on the source dataset, with the version number incremented by time| |Project Management|Data Set Management|Data Preprocessing|Support image data fusion, filtering, sampling operations| -|Project Management|Dataset Management|Data Mining|Supports finding the most beneficial data for model optimization in a large number of datasets| +|Project Management|Data Set Management|Data Mining|Supports finding the most beneficial data for model optimization in a large number of data sets| |Project Management|Data Set Management|Data Annotation|Support for adding annotations to image data| |Project Management|Data Set Management|Data Inference|Supports adding annotations to a data set by specifying a model| |Project Management|Model Management|Model Import|Support local import of model files to the platform| -|Project Management|Model Management|Training Models|Supports training models by selecting datasets, labels, and adjusting training parameters according to requirements, and viewing the corresponding model results after completion +|Project Management|Model Management|Training Models|Support to select datasets, labels, and adjust training parameters to train models according to requirements, and view the corresponding model results after completion| |Project Management|Model Management|Model Validation|Support uploading a single image to check the performance of the model in real images through visualization to verify the accuracy of the model| |Tag management|Tag management|Add tags|Support adding primary names and aliases of training tags| -|System configuration|Mirror management|My mirrors|Support adding custom mirrors to the system (available for administrators only)| -|System Configuration|Mirror Management|Public Mirror|Support to view public mirrors uploaded by others and copy them to your own system| +|Model Deployment|Algorithm Management|Public Algorithm|Support algorithm customization, view public algorithms and try them out, support adding to my algorithms| +|Model Deployment|Algorithm Management|Public Algorithm|Support publishing my algorithms to public algorithms| +|Model Deployment|Algorithm Management|My Algorithms|Support for viewing and editing my published algorithms and added algorithms| +|Model Deployment|Algorithm Management|Deploy Algorithms|Support deploying my algorithms to devices and viewing deployment history| +|Model Deployment|Device Management|View Devices|Support viewing device information and deployment history| +|Model Deployment|Device Management|Edit Device|Support adding, deploying, and deleting devices| +|Model Deployment|Device Management|Support Devices|Support viewing and purchasing of supported devices| +|System Configuration|Mirror Management|My Mirrors|Support for adding custom mirrors to the system (available to administrators only)| +|System Configuration|Mirror Management|Public Mirror|Support for viewing public mirrors uploaded by others and copying them to your own system| |System Configuration|Permissions Configuration|Permissions Management|Support for configuring user permissions (available only to administrators)| ## 1.2. Apply for trial @@ -176,6 +201,8 @@ How do users choose to install GUI or CMD? 2. If you need to modify the default configuration of the system, it is recommended to install CMD; +3. If you have already deployed the existing version of ymir, please refer to the [Upgrade Instructions](https://github.com/Aryalfrat/ymir/blob/dev/ymir/updater/readme.md). + This chapter contains the installation instructions for YMIR-GUI. If you need to use CMD, please refer to the [Ymir-CMD user guide](#4-for-advanced-users-ymir-cmd-command-line-users-guide). ## 2.1. Environment dependencies @@ -186,12 +213,42 @@ This chapter contains the installation instructions for YMIR-GUI. If you need to * Installation of Docker and Docker Compose https://docs.docker.com/get-docker/ -* Installation of NVIDIA Docker https://github.com/NVIDIA/nvidia-docker +* Installation of `nvidia-docker` [nvidia-docker install-guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker) + +```sh +## check the maximum CUDA version supported by the host +nvidia-smi +## for Host support cuda 11+, check nvidia-docker +sudo docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi +## for Host support cuda 10+, check nvidia-docker +sudo docker run --rm --gpus all nvidia/cuda:10.2-base-ubuntu18.04 nvidia-smi +## those commands should result in a console output shown below: ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 510.60.02 Driver Version: 510.60.02 CUDA Version: 11.6 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 Tesla P4 Off | 00000000:0B:00.0 Off | 0 | +| N/A 62C P0 55W / 75W | 4351MiB / 7680MiB | 94% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| 0 N/A N/A 8132 C 4349MiB | ++-----------------------------------------------------------------------------+ +``` 3. Hardware Suggestions * NVIDIA GeForce RTX 2080 Ti or higher is recommended. +* The maximum CUDA version supported by the host >= 11.2 + ## 2.2. Installation of YMIR-GUI The user must ensure that all the conditions in [Cuda environment dependencies](#21-cuda-environment-dependencies) have been met, otherwise the installation may fail. @@ -204,7 +261,7 @@ The YMIR-GUI project package is on DockerHub and the steps to install and deploy git clone git@github.com:IndustryEssentials/ymir.git ``` -2. If there is no available GPU and you need to install CPU mode, please change it to CPU boot mode by modifying the .env file to change the SERVER_RUNTIME parameter to runc: +2. If there is no available GPU and you need to install CPU mode, please change it to CPU boot mode by modifying the .env file to change the SERVER_RUNTIME parameter to runc: `# nvidia for gpu, runc for cpu.` @@ -222,6 +279,14 @@ LABEL_TOOL_HOST_PORT=set_your_label_tool_HOST_PORT ``` +* The default port number for YMIR's Model Deployment module is 18801. If there is a conflict that needs to be modified, you need to go to the YMIR directory and modify the .env file to configure the ModelDeployment port and MySQL access password: + +``` +DEPLOY_MODULE_HOST_PORT=18801 +DEPLOY_MODULE_URL=${DEPLOY_MODULE_HOST_PORT} +DEPLOY_MODULE_MYSQL_ROOT_PASSWORD=deploy_db_passwd +``` + Execute the start command after the modification: `bash ymir.sh start`. 4. After the service successfully started, YMIR will be available at [http://localhost:12001/](http://localhost:12001/). If you need to **stop the service**, run the command: `bash ymir.sh stop` @@ -286,11 +351,11 @@ docker-compose -f docker-compose.label_studio.yml down # 3. Use YMIR-GUI: typical model production process -This section uses a complete model iteration process as an example to illustrate how to use the YMIR platform. - -![YMIR-GUI process](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/YMIR-GUI-process.jpeg) +
+ +
 
 
-As shown in the figure above, YMIR divides the model development process into multiple steps. Details about how to run each step are listed in the subsequent sections. +As shown in the figure, YMIR divides the model development process into multiple steps. Details about how to run each step are listed in the subsequent sections. Data and labels are necessary for the training of deep learning, and the training requires a large amount of data with labels. However, what exists in reality is a large amount of unlabeled data, which is too costly in terms of labor and time if all of them are manually labeled. @@ -298,151 +363,7 @@ Therefore, YMIR platform, through active learning, first attains an initial mode The updated dataset is used to train the model again to improve the model capability. The YMIR platform provides a more efficient approach than labeling the entire data and then training it, reducing the cost of labeling low-quality data. Through the cycle of mining, labeling, and training, high quality data is expanded and the model capability is improved. -## 3.1. Label management - -When you need to import a dataset with annotation files, please make sure the annotation type belongs to the existing label list of the system, otherwise you need to go to the label management interface to add custom labels in order to import the data. The following figure shows: - -![Label management](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/label%20management.jpg) - -This time we add the tags 'helmet_head' 'no_helmet_head' to the list, the primary name and alias of the label indicate the same type of label. When the annotation of some dataset contains alias, it will be merged to primary name when importing. For example, if the label list contains the 'bike' (alias 'bicycle'), and a dataset A (containing only the 'bicycle') is imported, it will be displayed as 'bike' in the dataset details after import. - -## 3.2. Project management - -Users create projects according to their training goals(helmet_head,no_helmet_head) and set the target information such as mAP value, iteration rounds, etc. of the goals. As shown in the figure below: - -## 3.2.1. Iteration data preparation - -The user prepares the mining set to be used for data mining (which may not need to contain annotation files) and datasets with training targets (training set and test set) for training an initial model. Before importing, please ensure that the format of the dataset meets the following requirements: - -* The dataset is in.zip format, it should contain two folders named as "images" and "annotations" respectively; -* Images: create an ''images'' folder and place images in it. The formats currently supported by this platform are limited to jpg, jpeg, and png; -* Annotations: create an "annotations" folder and place annotation files formatted as [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/htmldoc/devkit_doc.html#SECTION00093000000000000000) (if there are no annotation files, leave the folder empty); - -The platform supports four kinds of dataset importing: public dataset replication, network importing, local importing, and path importing. - -![import guide](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/import1.jpg) - -(1) public dataset replication: the user can copy the built-in dataset of the super administrator to the current operating user. The user can filter and import the label categories they need, as shown in the figure below: - -![public dataset](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/public%20dataset.jpg) - -Select the dataset and choose whether you want to synchronize the labels contained in the public dataset, click [OK] to start copying. - -(2) Network import: users need to enter the URL path corresponding to the dataset as shown in the following: - -![inter import](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/net%20import.jpg) - -(3) Local import: users needs to upload a zip file of the local dataset in the following format. The zip size is recommended to be no more than 200MB. - -Users can download the example **Sample.zip** for reference as follows: - -![local import](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/local%20import.jpg) - -(4) Path Import: - -1. Download the open-source dataset VOC2012 ([Click to download VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar)) and unzip it. Change the folder name as required, and then compressing them separately into zip packages that meet the import requirements. - -2. Place dataset VOC2012 under `ymir-workplace/ymir-sharing`. - -3. Select 'path import' and enter the absolute path of the dataset in the server: `/ymir-sharing/voc2012`, as shown in the figure below: - -![path import](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/path%20import.jpg) - -After finishing the import of the initial dataset, click [Dataset Setting] to complete the corresponding dataset and mining strategy settings. The training set has been set as the default system training set when creating the project, and cannot be changed. - -![Iteration data prepare](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/iteration%20data%20prepare.jpg) - -## 3.2.2. Initial model preparation - -The user prepares the model for the initial iteration, either by local import or by model training. For local import, it is necessary to ensure that the model is in the required format. - -* Only models generated by the YMIR system are supported. -* The uploaded file should be less than 1024 MB. -* the detection target of the uploaded model file should be consistent with the project target. - -Model training can be done by clicking the [Training] operation button on the dataset list interface to jump to the Create Model Training interface, as shown in the following figure: - -![training1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/training1.jpg) - -Select the training set (train1 V1), select the test set (val V1), select the training target (helmet_head, no_helmet_head), select the pre-training model (not required), training docker, training type, algorithm framework, backbone network structure, number of GPUs and configure the training parameters (training parameters provide default values, the default parameters in the key value can not be modified, the value value can be modified, if you want to add parameters can be added), click create task. If you want to add parameters, you can add them yourself), click Create Task. As shown in the figure below, the initial model is trained. - -![training2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/training2.jpg) - -After successful creation, users can view the corresponding task progress and information on the task management page. Users can view the accuracy of the trained model (mAP value) after the task is completed. - -After finishing the import or training of the initial model, click [Select Model] to select the initial model and finish the setup.After both the iteration data and the model are prepared, the iteration is started. - -- **Model iterations (Improve accuracy through iteration)** - -When iteration is enabled, YMIR provides a standardized model iteration process and helps users fill in the results of the previous operation by default in each step of the operation, so that ordinary users can follow the established steps to complete the complete model iteration process. - -## 3.2.3. Mining data preparation - -YMIR provides data mining algorithms that support million-level data mining to quickly find the most favorite data for model optimization. - -[Mining Data Preparation] provides users with the data to be mined, and the original data set here is the mining set set set by project setting by default. The operation process is shown in the following figure. - -![mining data preparation 1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/miningdata%20preparation.jpg) -![mining data preparation 2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/miningdata%20preparation2.jpg) - -Click [Next] after the operation is completed to open the [Data Mining] process. - -## 3.2.4. Data mining - -The user can use the model obtained from the initial training to perform data mining on the dataset to be mined. Click the [Data Mining] button to jump to the data mining interface, as shown in the following figure. - -![mine1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/mining1.jpg) -![mine2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/mining2.jpg) - -The default original dataset is the result dataset prepared from the last mining data, and the default model is the initial model set in the iterative preparation. The user must also enter the filter TOPK as 500 (the first 500 successfully mined images), and set custom parameters if necessary. - -After successful creation, users can view the mining progress and the result on the dataset management page. - -## 3.2.5. Data labeling - -If the data mined in the previous step does not have labels, users need to label them. Users can click the [Label] button on the task management page to jump to the data annotation interface as shown in the following figure. - -![label1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/labeling1.jpg) - -The default original dataset is the result dataset obtained from the last mining. The user must also ente rthe email address of the annotator, and the labeling target (helmet_head, no_helmet_head). If you want to check the labeling platform by yourself, please click "View on labeling platform" and fill in your labeling platform account. If you have more detailed requirements for the annotation, you can upload the annotation description document for the annotator's reference. You must register with the labeling system in advance. You can click "Register Labeling Platform Account" at the bottom to jump to the Label Studio labeling platform to register their labeling account. Click on Create Task, as shown in the figure below: - -![label2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/labeling2.jpg) - -After successful creation, users can view the labeling progress and other information on the dataset management interface. After the operation completed, the YMIR will automatically retrieve the annotation results and generate a new dataset with the new annotation. - -## 3.2.6. Update trainingset - -After the labeling is completed, the labeled data set is merged into the training set and the merged results are generated into a new version of the training set. The following figure shows. - -![update1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/update1.jpg) -![update2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/update2.jpg) - -## 3.2.7. Model iteration - -![process-en](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images//process-en.jpeg) - -After the merging is completed, the model is trained again to generate a new version of the model, as shown below. - -![model iteration1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/model%20iteration1.jpg) -![model iteration2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/model%20iteration2.jpg) - -Users can download the models that meet their expectations. Or continue to the next iteration to further optimize the model. - -## 3.2.8. Model validation - -After training the model, users can validate the model. On the [Model Management] page, you can click the [Verify] button of the corresponding model to jump to the [Model Validation] page. As shown in the following figure: - -![model val1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images//model%20ver1.jpg) - -Select the validation mirror, adjust the parameters, click the [Upload Image] button, select the local image to upload, click [Model Validation], and display the results as follows. - -![model val2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/model_ver2.jpg) - -## 3.2.9. Model download - -Users can click the [Download] button on the [Model List] page. The downloaded file is a tar package, which contains the network structure of the model, network weights, hyper-parameter configuration files, training environment parameters, and results. As shown below: - -![model download](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/model-download.jpeg) +This section uses a complete model iteration process as an example to illustrate how to use the YMIR platform. Please check [Operating Instructions](https://github.com/IndustryEssentials/ymir/wiki/Operating-Instructions). # 4. For advanced users: YMIR-CMD (command line) user's guide @@ -471,337 +392,11 @@ $ mir --version ![process-en](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/process-en.jpeg) -The above figure shows a typical process of model training: 1) the user prepares external data, 2) imports it into the system, 3) appropriately filters the data, and 4) begins training to obtain a model (possibly with low accuracy). 5) selects images in a dataset to be mined that are suitable for further training based on this model, 6) annotates these images, 7) merges the annotated results with the original training set, and 8) uses the merged results to run the training process again to obtain a better model. -This section implement the process shown above using the command line. -All the following commands are prefixed with $ (which is also the Linux prompt under the normal user). When entering a command in the console, the user does not need to enter $ at the same time. - -### 4.2.1 Preparation of external data - -The system has the following requirements for external data. - -1. With [VOC annotations](https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5) - -2. The paths to all images (collectively referred to as assets or media in this system) need to be written uniformly in the "index.tsv" file. All annotation files need to be in the same directory. - -3. The user must have read access to "index.tsv," all images, and all annotation files. - -We take the pascal 2017 test dataset as an example. Download the dataset "VOC2012test.tar" from the official website and unpack. - -``` -$ tar -xvf VOC2012test.tar -``` - -After unpacking, the following directory structure is available (assuming VOCdevkit is in the /data directory). - -``` -/data/VOCdevkit -` - VOC2012 - |-- Annotations - |-- ImageSets - |-- Action - |-- Layout - |-- Main - | `-- Segmentation - ` -- JPEGImages -``` - -Note that all annotations are in the annotations directory and all images are located in the JPEGImages directory. - -Users can use the following command to generate the "index.tsv" file. - -``` -$ find /data/VOCdevkit/VOC2012/JPEGImages -type f > index.tsv -``` - -The generated "index.tsv" is as follows: - -``` -/data/VOCdevkit/VOC2012/JPEGImages/2009_001200.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2009_004006.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2008_006022.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2008_006931.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2009_003016.jpg -... -``` - -And this "index.tsv" can be used for the next step of data import. - -In addition, each annotation in the Annotations folder has the same main file name as the image. One of the xxx attributes will be extracted as a predefined keyword to be used in a later step of data filtering. - -### 4.2.2 Create local repo and import data - -The command line on this system uses a method similar to Git to manage user resources. Users create their own mir repository and perform all the following tasks in this mir repo. - -Use the following command to create a mir repo: - -``` -$ mkdir ~/mir-demo-repo && cd ~/mir-demo-repo # Create the directory and enter -$ mir init # init this directory to a mir repo -$ mkdir ~/ymir-assets ~/ymir-models # Creates assets and models storage directory, mir repo only keeps reference to assets and models -``` - -All type labels in mir repo are managed by `labels.csv`. Open file `~/mir-demo-repo/labels.csv`, and you can see the following contents: - -``` -# type_id, preserved, main type name, alias... -``` - -In `labels.csv`, each line represents a type label: type_id (start from 0), empty, main type name, alias. If the dataset contains person, cat and tv as it's label names, you can edit this file as follow: - -``` -0,,person -1,,cat -2,,tv -``` - -There could be one or more alias for each type label, for example: if television is sepecified as the alias for tv, the `labels.csv` could be changed to: - -``` -0,,person -1,,cat -2,,tv,television -``` - -You can edit this file by vi and other text editing tools. You can add alias to type labels or add new type labels, but it is not recommended to change or remove the id and name of any type label that already exists. - -The file `labels.csv` can be shared among multiple mir repos by establishing soft links. - -Users are required to prepare three data sets in advance. - -1. A training set (for example, named "dataset-training"), with annotations, for initial model training. - -2. A validation set (named "dataset-val") that includes annotations. - -3. Mining set (named "dataset-mining"): a large dataset to be mined from. - -The user imports the three data sets with the following command: - -``` -$ cd ~/mir-demo-repo -$ mir import --index-file /path/to/training-dataset-index.tsv \ # index.tsv path - --annotation-dir /path/to/training-dataset-annotation-dir \ # annotations dir - --gen-dir ~/ymir-assets \ # assets storage dir - --dataset-name 'dataset-training' \ # dataset name - --dst-rev 'dataset-training@import' # destination branch and task name -$ mir checkout master -$ mir import --index-file /path/to/val-dataset-index.tsv \ - --annotation-dir /path/to/val-dataset-annotation-dir \ - --gen-dir ~/ymir-assets \ - --dataset-name 'dataset-val' \ - --dst-rev 'dataset-val@import' -$ mir checkout master -$ mir import --index-file /path/to/mining-dataset-index.tsv \ - --annotation-dir /path/to/mining-dataset-annotation-dir \ - --gen-dir ~/ymir-assets \ - --dataset-name 'dataset-mining' \ - --dst-rev 'dataset-mining@import' -``` - -Users can use this command to see the branches of the current mir repo: - -``` -$ mir branch -``` - -This repo has four branches: master, dataset-training, dataset-val, and dataset-mining. The current repo is on the branch dataset-mining. - -Users can also view the status of branch with: - -``` -$ mir show --src-rev dataset-mining -``` - -And output as follow: - -``` -person;cat;car;airplane - -metadatas.mir: 200 assets, tr: 0, va: 0, te: 0, unknown: 200 -annotations.mir: hid: import, 113 assets -tasks.mir: hid: import -``` - -The first and second rows are predefined keywords and custom keywords (in this output, there are no custom keywords). The next lines are the number of resources, the number of comments, and the status of tasks under the current branch. - -### 4.2.3 Merge and filter - -Users can merge dataset-training and dataset-val with: - -``` -$ mir merge --src-revs tr:dataset-training@import;va:dataset-val@import \ # source branches to be merged - --dst-rev tr-va@merged \ # destination branch and task name - -s host # conflicts resolve strategy: use infos on host branch (the first branch in --src-revs) -``` - -After the merge is complete, you can see that the current repo is under the tr-va branch and you can check the status with mir show: - -``` -$ mir show --src-revs HEAD # HEAD refers to the current head branch, and can be replaced by the specific branch name tr-va -``` - -The output is as follows: - -``` -person;cat;car;airplane - -metadatas.mir: 3510 assets, tr: 2000, va: 1510, te: 0, unknown: 0 -annotations.mir: hid: merged, 1515 assets -tasks.mir: hid: merged -``` - -If the dataset-training and dataset-val before merging have 2000 and 1510 images, you can see that the merge branch has 2000 images as the training set and 1510 images as the validation set. -If the user only wants to train the model to detect humans and cats, we first filter out the resources of humans and cats from the tr-va branch: - -``` -mir filter --src-revs tr-va@merged \ - --dst-rev tr-va@filtered \ - -p 'person;cat' -``` - -### 4.2.4 Train the initial model - -First, users need to pull the training and mining docker images from the docker hub as follows: - -``` -docker pull industryessentials/executor-det-yolov4-training:release-0.1.2 -docker pull industryessentials/executor-det-yolov4-mining:release-0.1.2 -``` - -and start the training process with the following command: - -``` -mir train -w /tmp/ymir/training/train-0 \ - --media-location ~/ymir-assets \ # assets storage dir - --model-location ~/ymir-models \ # model storage dir - --task-config-file ~/training-config.yaml \ # training config file, get it from training docker image - --src-revs tr-va@filtered \ - --dst-rev training-0@trained \ - --executor industryessentials/executor-det-yolov4-training:release-0.1.2 # docker image name -``` - -After the model training is completed, the system will output the model ID. The user can view the package file of the model in "/ymir-models". - -### 4.2.5 Data mining - -This model is trained on a small dataset, and users can get the best images for the next training step in this mining process with the following command: - -``` -mir mining --src-revs dataset-mining@import \ # mining dataset branch - --dst-rev mining-0@mining \ # destination branch - -w /tmp/ymir/mining/mining-0 \ # tmp working dir for this task - --topk 200 \ # topk - --model-location ~/ymir-models \ - --media-location ~/ymir-assets \ - --model-hash \ # model id - --cache /tmp/ymir/cache \ # asset cache - --task-config-file ~/mining-config.yaml \ # mining config file, get it from mining docker image - --executor industryessentials/executor-det-yolov4-mining:release-0.1.2 # mining docker image name -``` - -### 4.2.6 Data labeling - -Now the user has 200 images on the branch "mining-0". These images will be most useful in the next training step. The next task is to export these resources and send them to the annotators for labeling. - -Users can export assets with the following command: - -``` -mir export --asset-dir /tmp/ymir/export/export-0/assets \ # export directory for assets - --annotation-dir /tmp/ymir/export/export-0/annotations \ # export directory for annotations - --media-location ~/ymir-assets \ # assets storage directory - --src-revs mining-0@mining \ - --format none # no annotations needed -find /tmp/ymir/export/export-0/assets > /tmp/ymir/export/export-0/index.tsv -``` - -After the export is done, users can see images at /tmp/ymir/export/export-0/assets. Users can send these pictures for annotation, and the annotations need to be saved in VOC format (assuming the save path is still /tmp/ymir/export/export-0/annotations). -Once the annotation is finished, the user can import the data using a similar approach to the import command in [4.2.2](#422-create-local-repo-and-import-data). - -``` -$ mir import --index-file /tmp/ymir/export/export-0/index.tsv - --annotation-dir /tmp/ymir/export/export-0/annotations \ # annotation directory - --gen-dir ~/ymir-assets \ # asset storage directory - --dataset-name 'dataset-mining' \ # dataset name - --dst-rev 'labeled-0@import' # destination branch and task name -``` - -### 4.2.7 Model iteration-data merging - -The branch "labeled-0" now contains the 200 new training images. Users can be merged together with the original training set by the merge command. - -``` -$ mir merge --src-revs tr-va@filtered;tr:labeled-0@import \ # source branch - --dst-rev tr-va-1@merged \ # destination branch and task name - -s host -``` - -### 4.2.8 Model iteration-model training - -Now branch tr-va-1 contains the previous training and validation set and 200 new images we have just mined and labeled. A new model can be trained on this set with the following command: - -``` -mir train -w /tmp/ymir/training/train-1 \ # different working directory for each different training and mining task - --media-location ~/ymir-assets \ - --model-location ~/ymir-models \ - --task-config-file ~/training-config.yaml \ - --src-revs tr-va-1@merged \ # use new-merged branch - --dst-rev training-1@trained \ - --executor industryessentials/executor-det-yolov4-training:release-0.1.2 -``` - -## 4.3. YMIR-CMD manual - -YMIR-command-api.211028 - -**Common parameter format and definitions** - -| Parameter Name | Variable Name | Type and Format | Definition | -| -------------- | ------------- | --------------- | ------------------------------------------------------------ | -| --root / -r | mir_root | str | The path the user needs to initialize. If not specified, it is the current path. | -| --dst-rev | dst_rev | str | 1. target-rev, single only | -| | | rev@tid | 2. All changes will be saved to this rev's tid | -| | | | 3. If it is a new rev, checkout the first src-revs before creating | -| | | | 4. tid is required. rev is required. | -| --src-revs | src_revs | str | 1. Multiple data source revisions separated by semicolons (only supported by merge, other cmd only support single) | -| | | typ:rev@bid | 2. typ is optional. Only effective in merge and supports pre-use identifiers (tr/va/te). If typ is empty, it means that the settings in the original rev are used | -| | | | 3. bid is optional. If it is empty, read the head task id | -| | | | 4. rev cannot be empty | -| | | | Note: When there are multiple revs, e.g. a1@b1; a2@b2, you need to enclose them in quotes because the semicolon is a Linux command separator | - -**mir init** - -| DESCRIPTION | | | -| ------------------------------------------------------------ | -------- | ------------- | -| mir init [--root ] | | | -| Initialize the current path, or the path specified by -root as a mir root | | | -| ARGS (name of ARGS, name, type, description of arguments in run_with_args) | | | -| --root / -r | mir_root | str, optional | -| RETURNS | | | -| Normal initialization: returns 0 | | | -| Exception: other error code | | | - -**mir branch** - -| DESCRIPTION | | | -| ------------------------------ | -------- | ------------- | -| mir branch [--root ] | | | -| List all local branches | | | -| ARGS | | | -| --root / -r | mir_root | str, optional | -| RETURNS | | | +The above figure shows a typical process of model training: 1) the user prepares external data, 2) imports it into the system, 3) appropriately filters the data, and 4) begins training to obtain a model (possibly with low accuracy). 5) selects images in a dataset to be mined that are suitable for further training based on this model, 6) annotates these images, 7) merges the annotated results with the original training set, and 8) uses the merged results to run the training process again to obtain a better model. This section implement the process shown above using the command line. For details, please check the [CMD usage instructions](https://github.com/IndustryEssentials/ymir/wiki/CMD-usage-instructions). # 5. Get the code -## 5.1. YMIR repos - -The YMIR project consists of three components: - -1. [Back-end](https://github.com/IndustryEssentials/ymir/tree/master/ymir/backend), task distribution and management. - -2. [Front-end](https://github.com/IndustryEssentials/ymir/tree/master/ymir/web), interactive interface. - -3. [Command line](https://github.com/IndustryEssentials/ymir/tree/master/ymir/command), a command-line interface (CLI) for managing the underlying annotation and image data. - -## 5.2. Code contribution +## 5.1. Code contribution Any code in the YMIR repo should follow the coding standards and will be checked in the CI tests. @@ -813,86 +408,13 @@ Any code in the YMIR repo should follow the coding standards and will be checked Also check out [MSFT Encoding Style](https://github.com/Microsoft/Recommenders/wiki/Coding-Guidelines) for more advice. -## 5.3 About training, inference and mining docker images +## 5.2. About training, inference and mining docker images -[Check this document](docs/ymir-cmd-container.md) for details +[Check this document](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-cmd-container.md) for details. # 6. Design concept - -## 6.1. Life of a dataset - -### 6.1.1. Introduction to a dataset - -The dataset consists of metadata and media files, and the metadata has the following characteristics: - -* A unique ID and the system has an initial default metadata status of null. - -* A list of resources, where each element points to an actual resource; Metadata doesn't actually hold resources, but only maintains this list of resources. - -* A number of keywords by which a user can search for a particular metadata status. - -* Support users to create a new metadata branch and perform operations on the newly created branch. The operations on the new branch do not affect the status of the original metadata, and the original metadata is still traceable by the user. These operations include but are not limited to the following: - - (1) Adding resources - (2) Adding or modifying annotations - (3) Add or modify keywords - (4) Filtering resources - (5) Merging two different metadatas - -* You can switch freely between different metadata. - -* You can query the history of the metadata. - -* You can tag the metadata to facilitate precise search by tag. - -* You can also add keywords to metadata to facilitate fuzzy search through keywords. - -* You can read the resources contained in a metadata and use those resources for browsing, training and so on. - -From the above description, it can be seen that the management of metadata is similar to that of VCS (Version Control System), and users can have the following completely different usage methods and scenarios: - -**The first scene**: Directly from the very first metadata, a filtering process is carried out to select and use the data that meets the requirements, as shown in the following figure: - -![Scenario1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/Scenario1.jpeg) - -Whenever the user needs to start a new task, -:: The user checks out a new feature branch from within the current master branch, getting the metadata in feature#1 state. -:: The user performs data filtering and other tasks on the metadata of this new branch. The user can obtain the metadata in the feature #2 state. -:: When it is confirmed that this metadata is suitable for the user's training task, then the user can start training using this data. - -* At this point, changes made by other users to the master branch's metadata will not affect the training data the user is using either. - -**The second scene**: Search for certain metadata by label or keyword. The user starts the screening process until the data meets the requirements, and then the data is used. As shown below: - -![Scenario2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/Scenario2.jpeg) - -At this point, whenever a user needs to carry out a new task, -:: Users can search for metadata that basically matches the user's requirements by means of keywords, labels, and so on. -:: On this basis, users need sign out a new branch. -:: Users can continue data filtering or cleansing on the new branch to obtain data that actually meets the requirements. -:: Users can use this data for training. - -**The third scene**: incremental merging. Suppose the user has completed the training task of the model using certain metadata. At this point, there is an update to the metadata of the repository and the master branch. The user wishes to merge this part of the update into the currently used metadata. - -![Scenario3](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/Scenario3.jpeg) - -Suppose the user is now in FEATURE#2 and needs to do the following: -:: You need switch back to master branch master. -:: You need repeat the task previously done for the incremental part master#2 - master#1 to obtain feature#2+. -:: You need cut back to feature#2 and merge feature#2+ to get feature#3. - -### 6.1.2. Branch and dataset management - -The discussion in this section is based on the following assumptions: -:: The user's data is imported in batches in units of datasets. -:: Each dataset is a separate branch. -:: Changes to and maintenance of each dataset are carried out on this branch. -:: Master branch is always empty. -This management approach is shown in the following figure: - -![branch and dataset](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/branch-and-dataset.jpeg) - -We use the concept of code version control in Git to manage our data and models, use the concept of branches to create new projects so that different tasks on the same set of images can run in parallel. The additions, retrievals, updates, and deletions of datasets and basic operations are created by commits to branches. Logically, each commit stores an updated version of the dataset or new model, as well as the metadata of the operation that led to this change. Finally, only the data changes are merged into the main branch. This branch conceptually aggregates all the data annotated by many projects on the platform. + +We use the concept of code version control in Git to manage our data and models, use the concept of branches to create new projects so that different tasks on the same set of images can run in parallel. The additions, retrievals, updates, and deletions of datasets and basic operations are created by commits to branches. Logically, each commit stores an updated version of the dataset or new model, as well as the metadata of the operation that led to this change. Finally, only the data changes are merged into the main branch. This branch conceptually aggregates all the data annotated by many projects on the platform. Please see [Life of a dataset](https://github.com/IndustryEssentials/ymir/wiki/Life-of-a-dataset) for specific design concepts. # 7. MISC @@ -908,7 +430,7 @@ The default profile template needs to be extracted in the mirror. The training image `industryessentials/executor-det-yolov4-training:release-0.1.2` has a configuration file template located at: `/img-man/training-template.yaml` -Mining and inference mirrors The configuration file templates for `industryessentials/executor-det-yolov4-mining:release-0.1.2` are located at: `/img-man/mining-template.yaml` (mining) and `/img-man/infer-template. yaml` (infer) +Mining and inference mirrors The configuration file templates for `industryessentials/executor-det-yolov4-mining:release-0.1.2` are located at: `/img-man/mining-template.yaml` (mining) and `/img-man/infer-template. yaml` (infer). **How can the trained model be used outside the system?** @@ -918,15 +440,20 @@ After successful training, the system will output the ID of the model. The user It has not been fully tested on Windows server, so we cannot provide service support for the time being. +**How to import models I've already trained?** + +See [this document](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md). + -[![All Contributors](https://img.shields.io/badge/All%20Contributors-8-brightgreen)](#contributors-) +[![All Contributors](https://img.shields.io/badge/All%20Contributors-9-brightgreen)](#contributors-) - - - - - - - - + + + + + + + + + diff --git a/README_zh-CN.md b/README_zh-CN.md index d7f7f2367a..c59b0ecb2e 100755 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -31,39 +31,78 @@
 
-[📘使用说明](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#3-gui%E4%BD%BF%E7%94%A8-%E5%85%B8%E5%9E%8B%E6%A8%A1%E5%9E%8B%E7%94%9F%E4%BA%A7%E6%B5%81%E7%A8%8B) | -[🛠️安装教程](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#2-%E5%AE%89%E8%A3%85) | -[👀推荐配置](https://c1n.cn/obvHf) | +[📘使用说明](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E) | +[🛠️安装教程](README_zh-CN.md#2-%E5%AE%89%E8%A3%85) | [🚀进行中的项目](https://github.com/IndustryEssentials/ymir/projects) | [🤔报告问题](https://github.com/IndustryEssentials/ymir/issues/new/choose) | [📰开源协议](https://github.com/IndustryEssentials/ymir/blob/master/LICENSE) - +   - +
- + 📫 使用问题反馈:contact.viesc@gmail.com / 服务器级设备专业咨询:tensor.station@gmail.com - +
 
 
+[English](README.md) | 简体中文 + # 文章引用 如要在您的工作中引用YMIR,请使用下面的Bibtex: ```bibtex @inproceedings{huang2021ymir, - title={YMIR: A Rapid Data-centric Development Platform for Vision Applications}, + title={YMIR: A Rapid Data-centric Development Platform for Vision Applications}, author={Phoenix X. Huang and Wenze Hu and William Brendel and Manmohan Chandraker and Li-Jia Li and Xiaoyu Wang}, booktitle={Proceedings of the Data-Centric AI Workshop at NeurIPS}, year={2021}, } ``` -## 简介 +# 更新内容 -[English](README.md) | 简体中文 +2.0.0版本更新时间为11/08/2022 + +YMIR平台 +- 新增模型性能诊断模块; +- 新增对模型推理结果进行可视化评估的功能; +- 新增公共算法库,内置多种高精度算法; +- 新增一键部署功能,支持将算法部署到认证设备上; +- 新增操作说明文档; +- 优化代码结构。 + +Docker +- 支持 [yolov5](https://github.com/ultralytics/yolov5) +- 支持 [mmdetection](https://github.com/open-mmlab/mmdetection) +- 支持 [yolov7](https://github.com/wongkinyiu/yolov7) +- 支持 [detectron2](https://github.com/facebookresearch/detectron2) +- 支持 [An Extendable, Efficient and Effective Transformer-based Object Detector](https://github.com/naver-ai/vidt) +- 支持 [ymir镜像测试工具库](https://github.com/modelai/ymir-executor-verifier) +- 支持 [demo 示例镜像制作文档](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev/det-demo-tmi) +- 支持 [ymir镜像开发扩展库](https://github.com/modelai/ymir-executor-sdk) + +查看更多内容 [ymir-executor-fork](https://github.com/modelai/ymir-executor-fork) 。 + +在公共镜像内 +- 更新yolov5训练镜像:youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-tmi +- 更新mmdetection训练镜像:youdaoyzbx/ymir-executor:ymir1.3.0-mmdet-cu111-tmi +- 更新支持rv1126芯片部署的yolov5训练镜像:youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-cu111-modelstore +- 更新支持yolov5-v6.2的训练镜像:youdaoyzbx/ymir-executor:ymir1.3.0-yolov5-v6.2-cu111-tmi + +更多代码更新请查看 [ymir-dev](https://github.com/modelai/ymir-executor-fork/tree/ymir-dev)。 + +# 硬件支持 (可选) + +YYMIR支持将训练好的模型直接部署到认证设备,需要查看更多硬件参数,请查看[详情](https://i-item.jd.com/10065116628109.html)。 + +
+ +
 
 
+ +## 简介 @@ -71,6 +110,8 @@ **目录** - [文章引用](#文章引用) +- [更新内容](#更新内容) +- [硬件支持 (可选)](#%E7%A1%AC%E4%BB%B6%E6%94%AF%E6%8C%81-%E5%8F%AF%E9%80%89) - [1. AI SUITE-YMIR介绍](#1--ai-suite-ymir%E4%BB%8B%E7%BB%8D) - [1.1. 主要功能](#11-主要功能) - [1.2. 申请试用](#12-申请试用) @@ -79,39 +120,15 @@ - [2.2. 安装 YMIR-GUI](#22-%E5%AE%89%E8%A3%85-ymir-gui) - [2.3. 安装配置LabelStudio (可选)](#23-%E5%AE%89%E8%A3%85%E9%85%8D%E7%BD%AElabelstudio-%E5%8F%AF%E9%80%89) - [3. GUI使用-典型模型生产流程](#3-gui%E4%BD%BF%E7%94%A8-%E5%85%B8%E5%9E%8B%E6%A8%A1%E5%9E%8B%E7%94%9F%E4%BA%A7%E6%B5%81%E7%A8%8B) - - [3.1. 标签管理](#31-%E6%A0%87%E7%AD%BE%E7%AE%A1%E7%90%86) - - [3.2. 项目管理](#32-项目管理) - - [3.2.1 迭代数据准备](#321-迭代数据准备) - - [3.2.2 初始模型准备](#322-初始模型准备) - - [3.2.3 挖掘数据准备](#323-挖掘数据准备) - - [3.2.4 数据挖掘](#324-数据挖掘) - - [3.2.5 数据标注](#325-数据标注) - - [3.2.6 更新训练集](#326-更新训练集) - - [3.2.7 合并训练](#327-合并训练) - - [3.2.8 模型验证](#328-模型验证) - - [3.2.9 模型下载](#329-模型下载) - [4. 进阶版:Ymir-CMD line使用指南](#4-%E8%BF%9B%E9%98%B6%E7%89%88ymir-cmd-line%E4%BD%BF%E7%94%A8%E6%8C%87%E5%8D%97) - [4.1 安装](#41-%E5%AE%89%E8%A3%85) - [方式一:通过pip安装](#%E6%96%B9%E5%BC%8F%E4%B8%80%E9%80%9A%E8%BF%87pip%E5%AE%89%E8%A3%85) - [方式二:通过源码安装](#%E6%96%B9%E5%BC%8F%E4%BA%8C%E9%80%9A%E8%BF%87%E6%BA%90%E7%A0%81%E5%AE%89%E8%A3%85) - [4.2 典型模型生产流程](#42-%E5%85%B8%E5%9E%8B%E6%A8%A1%E5%9E%8B%E7%94%9F%E4%BA%A7%E6%B5%81%E7%A8%8B) - - [4.2.1 准备外部数据](#421-%E5%87%86%E5%A4%87%E5%A4%96%E9%83%A8%E6%95%B0%E6%8D%AE) - - [4.2.2 建立本地repo并导入数据](#422-%E5%BB%BA%E7%AB%8B%E6%9C%AC%E5%9C%B0repo%E5%B9%B6%E5%AF%BC%E5%85%A5%E6%95%B0%E6%8D%AE) - - [4.2.3 合并及筛选](#423-%E5%90%88%E5%B9%B6%E5%8F%8A%E7%AD%9B%E9%80%89) - - [4.2.4 训练第一个模型](#424-%E8%AE%AD%E7%BB%83%E7%AC%AC%E4%B8%80%E4%B8%AA%E6%A8%A1%E5%9E%8B) - - [4.2.5 挖掘](#425-%E6%8C%96%E6%8E%98) - - [4.2.6 标注](#426-%E6%A0%87%E6%B3%A8) - - [4.2.7 合并](#427-%E5%90%88%E5%B9%B6) - - [4.2.8 训练第二个模型](#428-%E8%AE%AD%E7%BB%83%E7%AC%AC%E4%BA%8C%E4%B8%AA%E6%A8%A1%E5%9E%8B) - - [4.3. 命令参数手册](#43-%E5%91%BD%E4%BB%A4%E5%8F%82%E6%95%B0%E6%89%8B%E5%86%8C) - [5. 获取代码](#5--%E8%8E%B7%E5%8F%96%E4%BB%A3%E7%A0%81) - - [5.1 YMIR repos](#51--ymir-repos) - - [5.2 代码贡献](#52--%E4%BB%A3%E7%A0%81%E8%B4%A1%E7%8C%AE) - - [5.3 关于训练,推理与挖掘镜像](#53-%E5%85%B3%E4%BA%8E%E8%AE%AD%E7%BB%83%E6%8E%A8%E7%90%86%E4%B8%8E%E6%8C%96%E6%8E%98%E9%95%9C%E5%83%8F) + - [5.1 代码贡献](#51--%E4%BB%A3%E7%A0%81%E8%B4%A1%E7%8C%AE) + - [5.2 关于训练,推理与挖掘镜像](#52-%E5%85%B3%E4%BA%8E%E8%AE%AD%E7%BB%83%E6%8E%A8%E7%90%86%E4%B8%8E%E6%8C%96%E6%8E%98%E9%95%9C%E5%83%8F) - [6. 设计理念](#6-设计理念) - - [6.1. Life of a dataset](#61-life-of-a-dataset) - - [6.1.1 数据集介绍](#611-数据集介绍) - - [6.1.2 分支与数据集的管理](#612-分支与数据集的管理) - [7.MISC](#7misc) - [7.1 常见问题](#71-%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) @@ -122,7 +139,7 @@ YMIR(You Mine In Recursion)是一个简化的模型开发产品,专注于AI SUITE开源系列中的数据集版本和模型迭代。
- +
 
 
目前人工智能商业化在算力、算法和技术方面达到阶段性成熟。AI应用在产品落地过程中经常会遇到无法大规模扩展的瓶颈,例如人才紧缺、开发成本高、迭代周期长等问题。 @@ -143,7 +160,7 @@ YMIR主要以数据为中心,提供高效模型开发迭代能力,集成了 YMIR平台主要满足用户规模化生产模型的需求,为用户提供良好、易用的展示界面,便于数据和模型的管理与查看。平台包含项目管理、标签管理、系统配置等主要功能模块,支持实现以下主要功能: -| 功能模块 | 一级功能 | 二级功能 | 功能说明 | +| 功能模块 | 一级功能 | 二级功能 | 功能说明 | |----------|-----------|------------|-----------------------------------------| |项目管理|项目管理|项目编辑|支持添加、删除、编辑项目及项目信息| |项目管理|迭代管理|迭代准备|支持设置迭代所需要的数据集和模型信息| @@ -161,6 +178,13 @@ YMIR平台主要满足用户规模化生产模型的需求,为用户提供良 |项目管理|模型管理|训练模型|支持自选数据集、标签,并根据需求调整训练参数来训练模型,完成后可查看对应的模型效果| |项目管理|模型管理|模型验证|支持上传单张图片,通过可视化的方式查看模型在真实图片中的表现,以校验模型的精确度| |标签管理|标签管理|新增标签|支持训练标签的主名和别名的添加| +|模型部署|算法管理|公共算法|支持算法定制化、查看公共算法并试用,支持添加到我的算法中| +|模型部署|算法管理|公共算法|支持发布我的算法到公共算法| +|模型部署|算法管理|我的算法|支持查看和编辑我发布的算法和已添加的算法| +|模型部署|算法管理|部署算法|支持部署我的算法到设备上、查看部署历史| +|模型部署|设备管理|查看设备|支持设备信息和部署历史的查看| +|模型部署|设备管理|编辑设备|支持设备的添加、部署、删除| +|模型部署|设备管理|支持设备|支持对支持设备的查看和购买| |系统配置|镜像管理|我的镜像|支持添加自定义镜像到系统中(仅管理员可用)| |系统配置|镜像管理|公共镜像|支持查看其他人上传的公共镜像并复制到自己的系统中| |系统配置|权限配置|权限管理|支持对用户的权限进行配置(仅管理员可用)| @@ -177,19 +201,50 @@ YMIR平台主要满足用户规模化生产模型的需求,为用户提供良 2.如需要修改系统默认的配置,推荐安装CMD; +3.如已经部署ymir的已有版本,请参考[升级说明](https://github.com/IndustryEssentials/ymir/blob/dev/ymir/updater/readme_zh-CN.md)。 + 本章节为YMIR-GUI的安装说明,如需使用CMD,请参考[Ymir-CMD line使用指南](#4-进阶版ymir-cmd-line使用指南)。 ## 2.1. 环境依赖 -1. GPU版本需要GPU,并安装nvidia驱动: [https://www.nvidia.cn/geforce/drivers/](https://www.nvidia.cn/geforce/drivers/) +1. GPU版本需要GPU,并安装nvidia驱动: [https://www.nvidia.cn/geforce/drivers/](https://www.nvidia.cn/geforce/drivers/) 2. 需要安装docker: -* Docker & Docker Compose 安装: [https://docs.docker.com/get-docker/](https://docs.docker.com/get-docker/) -* NVIDIA Docker安装: [https://github.com/NVIDIA/nvidia-docker](https://github.com/NVIDIA/nvidia-docker) - +* Docker & Docker Compose 安装: [https://docs.docker.com/get-docker/](https://docs.docker.com/get-docker/) +* `NVIDIA Docker`安装: [nvidia-docker install-guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker) + +```sh +## 通过nvidia-smi查看主机显卡驱动支持的最高cuda版本 +nvidia-smi +## 对支持CUDA11以上版本的主机, 检查nvidia-docker是否安装成功 +sudo docker run --rm --gpus all nvidia/cuda:11.0.3-base-ubuntu20.04 nvidia-smi +## 对支持CUDA10的主机, 检测nvidia-docker是否安装成功 +sudo docker run --rm --gpus all nvidia/cuda:10.2-base-ubuntu18.04 nvidia-smi +## 上述命令在终端应输出类似以下的结果 (最高支持cuda 11.6) ++-----------------------------------------------------------------------------+ +| NVIDIA-SMI 510.60.02 Driver Version: 510.60.02 CUDA Version: 11.6 | +|-------------------------------+----------------------+----------------------+ +| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | +| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | +| | | MIG M. | +|===============================+======================+======================| +| 0 Tesla P4 Off | 00000000:0B:00.0 Off | 0 | +| N/A 62C P0 55W / 75W | 4351MiB / 7680MiB | 94% Default | +| | | N/A | ++-------------------------------+----------------------+----------------------+ ++-----------------------------------------------------------------------------+ +| Processes: | +| GPU GI CI PID Type Process name GPU Memory | +| ID ID Usage | +|=============================================================================| +| 0 N/A N/A 8132 C 4349MiB | ++-----------------------------------------------------------------------------+ +``` + 3. 推荐服务器配置: * NVIDIA GeForce RTX 2080 Ti 12G * 显存最大值到达9974MiB +* 显卡驱动支持的最高CUDA 版本 >= 11.2 ## 2.2. 安装 YMIR-GUI @@ -220,6 +275,14 @@ LABEL_TOOL_HOST_PORT=set_your_label_tool_HOST_PORT ``` +* YMIR的模型部署模块默认端口号为18801,如有冲突需要修改,则需要前往YMIR目录下修改.env文件,配置 ModelDeployment 端口和 MySQL 访问密码: + +``` +DEPLOY_MODULE_HOST_PORT=18801 +DEPLOY_MODULE_URL=${DEPLOY_MODULE_HOST_PORT} +DEPLOY_MODULE_MYSQL_ROOT_PASSWORD=deploy_db_passwd +``` + 修改完成后再执行启动命令:`bash ymir.sh start`。 4. 服务启动成功后,默认配置端口为12001,可以直接访问 [http://localhost:12001/](http://localhost:12001/) 显示登录界面即安装成功。如果需要**停止服务**,运行命令为:`bash ymir.sh stop` @@ -269,8 +332,6 @@ LABEL_TOOL_TOKEN="Token token_value" # 3. GUI使用-典型模型生产流程 -本次使用一次模型迭代的完整流程来说明YMIR平台的操作过程。 - ![YMIR-GUI process](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/YMIR-GUI-process.jpeg) 数据和标签是深度学习模型训练的必要条件,模型的训练需要大量带标签的数据。然而在实际情况下,现实中存在的是大量没有标签的数据,如果全部由标注人员手工打上标签,人力和时间成本过高。 @@ -278,157 +339,13 @@ LABEL_TOOL_TOKEN="Token token_value" 因此,YMIR平台通过主动学习的方法,首先通过本地导入或者少量数据来训练出一个初始模型,使用该初始模型,从海量数据中挖掘出对模型能力提高最有利的数据。挖掘完成后,仅针对这部分数据进行标注,对原本的训练数据集进行高效扩充。 使用更新后的数据集再次训练模型,以此来提高模型能力。相比于对全部数据标注后再训练,YMIR平台提供的方法更高效,减少了对低质量数据的标注成本。通过挖掘,标注,训练的循环,扩充高质量数据,提升模型能力。 - -## 3.1. 标签管理 - -当用户需要导入的数据集带有标注文件时,请确保标注类型属于系统已有的标签列表,否则需要用户前往标签管理界面新增自定义标签,以便导入数据。如下图所示: - -![标签管理](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%A0%87%E7%AD%BE%E7%AE%A1%E7%90%86.jpg) - -本次我们在标签列表中添加标签helmet_head、no_helmet_head,其中标签的主名与别名表示同一类标签,当某些数据集的标注包含别名时,会在导入时合并变更为主名。如,标签列表中包含标签bike(别名bicycle),导入某数据集A(仅包含标签bicycle),则导入后在数据集详情显示标注为bike。 - -## 3.2. 项目管理 - -用户根据自己的训练目标(helmet_head,no_helmet_head)创建项目,并设置目标的mAP值、迭代轮次等目标信息。如下图所示: - -## 3.2.1 迭代数据准备 - -用户准备好用于数据挖掘的挖掘集(可以不需要包含标注文件),以及带有训练目标的数据集(训练集,测试集),用于训练初始模型。针对本身带有标注文件的数据集,在导入之前,需要保证数据集的格式符合格式要求: - -* 数据集为.zip格式,其中包含两个文件夹,需分别命名为images和annotations; -* images文件夹存放数据的图片信息,图像格式限为jpg、jpeg、png; -* annotations文件夹存放数据的标注信息,标注文件格式为pascal voc(当无标注文件时,该文件夹为空); - -数据集导入支持四种形式:公共数据集导入、网络导入、本地导入和路径导入,如下图所示: - -![数据导入引导](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%AF%BC%E5%85%A51.jpg) - -(1) 公共数据集复制:导入公共用户内置的数据集,该数据集存储在公共用户上,以复制的形式导入到当前的操作用户上。如下图所示: - -![公共数据集导入](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%85%AC%E5%85%B1%E6%95%B0%E6%8D%AE%E9%9B%86%E5%AF%BC%E5%85%A5.jpg) - -选择数据集,可根据需求选择是否要同步导入公共数据集包含的标签,点击【确定】即可开始复制。 - -(2) 网络导入:输入数据集对应的url路径,如下图所示: - -![网络导入](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E7%BD%91%E7%BB%9C%E5%AF%BC%E5%85%A5.jpg) - -(3) 本地导入:上传本地数据集的压缩包,压缩包大小建议不超过200M。 - -用户可以下载示例**Sample.zip**作为参考,如下图所示: - -![本地导入](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%9C%AC%E5%9C%B0%E5%AF%BC%E5%85%A5.jpg) - -(4)路径导入:输入数据集在服务器中的绝对路径。如下图所示: - -![路径导入](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E8%B7%AF%E5%BE%84%E5%AF%BC%E5%85%A5.jpg) - -1.通过在网络中下载开源数据集VOC2012([点击下载VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar)),解压缩后按要求修改文件夹名称,再分别压缩为符合导入要求的zip包; - -2.把VOC2012放到 `ymir-workplace/ymir-sharing` 下面; - -3.选择路径导入,填上路径地址`/ymir-sharing/voc2012`。 - -完成初始数据集的导入后,点击【迭代数据准备】,完成对应的数据集和挖掘策略设置。其中训练集已设置为创建项目时默认的系统训练集,不可变更。 - -![迭代数据准备](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E8%BF%AD%E4%BB%A3%E6%95%B0%E6%8D%AE%E5%87%86%E5%A4%87.jpg) - -## 3.2.2 初始模型准备 - -用户准备用于初始迭代的模型,可以通过本地导入和模型训练两种方式,本地导入需要保证模型的格式符合要求: - -* 仅支持YMIR系统产生的模型; -* 上传文件应小于1024MB; -* 上传的模型文件检测目标应该与项目目标保持一致; - -模型训练可以点击数据集列表界面的【训练】操作按钮,跳转至创建模型训练界面,如下图所示: - -![训练1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E8%AE%AD%E7%BB%83%E6%A8%A1%E5%9E%8B1.jpg) - -选择训练集(train1 V1),选择测试集(val V1),选择训练目标(helmet_head,no_helmet_head),选择前置预训练模型(非必填)、训练镜像、训练类型、算法框架、骨干网络结构、GPU个数以及配置训练参数(训练参数提供默认值,默认参数中key值不可修改,value值可修改,如需添加参数可以自行添加)。如下图所示,训练初始模型: - -![训练2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E8%AE%AD%E7%BB%83%E6%A8%A1%E5%9E%8B2.jpg) - -训练成功后,跳转到模型列表界面,可以查看到相应的训练进度和信息,完成后可查看模型的效果(mAP值)。 - -完成初始模型的导入或者训练后,点击【迭代模型准备】,选择初始模型,完成设置。迭代数据和模型均准备完成后,开启迭代。 - -## 模型迭代(通过迭代提升模型精度) - -开启迭代后,YMIR提供标准化的模型迭代流程,并且会在每一步操作中帮助用户默认填入上一次的操作结果,普通用户按照既定步骤操作,即可完成完整的模型迭代流程。 - -## 3.2.3. 挖掘数据准备 - -由于在模型训练的初期,很难一次性找到大量的优质数据来进行训练,导致初始模型的精度不够。因此,寻找有利于模型训练的数据一直是人工智能算法开发的一大问题,在这个过程中,往往会对算法工程师的人力资源产生很大消耗。在此基础上,YMIR提供成熟的挖掘算法,支持百万级数据挖掘,在海量数据中快速寻找到对模型优化最有利的数据,降低标注成本,减少迭代时间,保障模型的持续迭代。 - -【挖掘数据准备】为用户提供待挖掘的数据,这里的原数据集默认为项目设置的挖掘集。操作流程如下图示: - -![挖掘数据准备1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%87%86%E5%A4%87%E6%8C%96%E6%8E%98%E6%95%B0%E6%8D%AE.jpg) -![挖掘数据准备2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%87%86%E5%A4%87%E6%8C%96%E6%8E%98%E6%95%B0%E6%8D%AE2.jpg) - -操作完成后点击【下一步】,开启【数据挖掘】流程。 - -## 3.2.4. 数据挖掘 - -接下来使用在迭代模型准备时选择的模型,对待挖掘的数据集进行数据挖掘。点击【数据挖掘】按钮,跳转至数据挖掘界面,如下图所示: - -![挖掘1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%8C%96%E6%8E%981.jpg) -![挖掘2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%8C%96%E6%8E%982.jpg) - -默认原数据集为上次挖掘数据准备的结果数据集,默认模型为迭代准备中设置的初始模型,输入筛选测试TOPK=500(前500张成功挖掘的图像)和设定自定义参数(自定义参数提供默认值,默认参数中key值不可修改,value值可修改,如需添加参数可以自行添加)。 - -创建成功后,跳转到数据集管理界面,可以查看到相应的挖掘进度和信息,挖掘完成后可查挖掘出的结果数据集。 - -## 3.2.5. 数据标注 - -如果上一步中挖掘出来的数据没有标签,则需要进行标注。点击【数据标注】按钮,跳转至数据标注界面,如下图所示: - -![标注1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%A0%87%E6%B3%A81.jpg) - -默认原数据集为上次挖掘得到的结果数据集,输入标注人员邮箱(需要提前去标注系统注册,点击最下方“注册标注平台账号”即可跳转到Label Studio标注平台注册标注账号),选择标注目标(helmet_head, no_helmet_head),如需自行到标注平台查看,请勾选“到标注平台查看”,填写自己的标注平台账号(同样需要提前注册),如对标注有更详细的要求,则可以上传标注说明文档供标注人员参考。点击创建,如下图所示: - -![标注2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%A0%87%E6%B3%A82.jpg) - -创建成功后,跳转到数据集管理界面,可以查看到相应的标注进度和信息,标注完成后,系统自动获取完成结果,生成带有新标注的数据集。 - -## 3.2.6. 更新训练集 - -标注完成后,将已标注好的数据集合并到训练集中,并将合并结果生成为一个新的训练集版本。如下图所示: - -![更新1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%9B%B4%E6%96%B01.jpg) -![更新2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%9B%B4%E6%96%B02.jpg) - -## 3.2.7. 合并训练 - -![合并训练](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%B5%81%E7%A8%8B-%E4%B8%AD%E6%96%87.jpeg) - -合并完成后,再次进行模型训练,生成新的模型版本,如下图所示: - -![合并训练1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%90%88%E5%B9%B6%E8%AE%AD%E7%BB%831.jpg) -![合并训练2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%90%88%E5%B9%B6%E8%AE%AD%E7%BB%832.jpg) - -用户可对达到预期的模型进行下载。或继续进入下一轮迭代,进一步优化模型。 - -## 3.2.8. 模型验证 - -每次训练模型后,可以对模型结果进行验证,即通过可视化的方式查看模型在真实图片中的表现。在【模型管理】页面,点击对应模型的【验证】按钮,跳转到【模型验证】页面,如下图所示: - -![模型验证1](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%A8%A1%E5%9E%8B%E9%AA%8C%E8%AF%811.jpg) - -选择验证镜像,调整参数,点击【上传图片】按钮,选择本地图片上传,点击【模型验证】,显示结果如下: - -![模型验证2](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%A8%A1%E5%9E%8B%E9%AA%8C%E8%AF%812.jpg) - -## 3.2.9. 模型下载 - -在【模型列表】页面,点击【下载】按钮,下载文件格式为tar包,包含模型的网络结构、为网络权重、超参数配置文件、训练的环境参数及结果,如下图所示: - -![模型下载](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%A8%A1%E5%9E%8B%E4%B8%8B%E8%BD%BD.jpeg) + +本次使用一次模型迭代的完整流程来说明YMIR平台的操作过程。具体的操作流程请查看[操作说明](https://github.com/IndustryEssentials/ymir/wiki/%E6%93%8D%E4%BD%9C%E8%AF%B4%E6%98%8E)。 # 4. 进阶版:Ymir-CMD line使用指南 本章节为YMIR-CMD line的使用说明,如需安装和使用GUI,请参考[GUI安装说明](#2-安装)。 - + ## 4.1 安装 ### 方式一:通过pip安装 @@ -452,330 +369,11 @@ $ mir --version ![流程-中文](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E6%B5%81%E7%A8%8B-%E4%B8%AD%E6%96%87.jpeg) 上图所示的是模型训练的一个典型流程:用户准备好外部数据,导入本系统,对数据进行适当筛选,开始训练得到一个(可能是粗精度的)模型,并依据这个模型,在一个待挖掘数据集中挑选适合进一步训练的图片,将这些图片进行标注,标注完成的结果与原训练集合并,用合并以后的结果再次执行训练过程,得到效果更好的模型。 -在这一节里,我们需要使用命令行实现上图所示的流程,其他流程也可以类似实现。 -以下所有命令前面都加入了$(它也是普通用户下Linux提示符),在实际向控制台输入命令时,$不需要一起输入。 - -### 4.2.1 准备外部数据 - -本系统对外部数据有以下要求: - -1.标注为[VOC格式](https://towardsdatascience.com/coco-data-format-for-object-detection-a4c5eaf518c5); - -2. 所有图片(本系统中统称为assets或medias)的路径需要统一写入index.tsv文件,同时,所有标注文件都需要位于同一个目录中; - -3. 运行命令行的用户需要index.tsv,所有图片文件和所有标注文件的读权限。 - -我们以 pascal 2017 test 数据集为例,描述一下外部数据集的准备过程。 -在官网下载数据集VOC2012test.tar,使用以下命令解压: - -``` -$ tar -xvf VOC2012test.tar -``` -解压以后,可以得到以下目录结构(假设VOCdevkit位于/data目录下): - -``` -/data/VOCdevkit -`-- VOC2012 - |-- Annotations - |-- ImageSets - | |-- Action - | |-- Layout - | |-- Main - | `-- Segmentation - `-- JPEGImages -``` - -其中,所有标注都位于annotations目录,而所有图片都位于JPEGImages目录。 -使用下述命令生成index.tsv文件: - -``` -$ find /data/VOCdevkit/VOC2012/JPEGImages -type f > index.tsv -``` - -可以看到index.tsv中有如下内容: - -``` -/data/VOCdevkit/VOC2012/JPEGImages/2009_001200.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2009_004006.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2008_006022.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2008_006931.jpg -/data/VOCdevkit/VOC2012/JPEGImages/2009_003016.jpg -... -``` - -这个index.tsv可用于下一步的数据导入。 - -另外,在Annotations文件夹中,每个标注都拥有和图片相同的主文件名。其中的xxx属性将被提取成为预定义关键字,被用于后面一步的数据筛选。 - -### 4.2.2 建立本地repo并导入数据 - -本系统的命令行采用和 git 类似的做法对用户的资源进行管理,用户建立自己的 mir repository,并在此 mir repo 中完成接下来的所有任务。 - -想要建立自己的 mir repo,用户只需要: - -``` -$ mkdir ~/mir-demo-repo && cd ~/mir-demo-repo # 建立目录并进入 -$ mir init # 将此目录初始化成一个mir repo -$ mkdir ~/ymir-assets ~/ymir-models # 建立资源和模型存储目录,所有的图像资源都会保存在此目录中,而在mir repo中只会保留对这些资源的引用 -``` - -mir repo 中的标签通过标签文件进行统一管理,打开标签文件 `~/mir-demo-repo/labels.csv`,可以看到以下内容: - -``` -# type_id, preserved, main type name, alias... -``` - -在这个文件中,每一行代表一个类别标签,依次是标签 id(从 0 开始增长),留空,主标签名,一个或多个标签别名,例如,如果想要导入的数据集中含有 person, cat 和 tv 这几个标签,可以编辑此文件为: - -``` -0,,person -1,,cat -2,,tv -``` - -一个类别标签可以指定一个或多个别名,例如,如果指定 television 作为 tv 的别名,则 `labels.csv` 文件可更改为: - -``` -0,,person -1,,cat -2,,tv,television -``` - -可以使用vi,或其他的编辑工具对此文件进行编辑,用户可以添加类别的别名,也可以增加新的类别,但不建议更改或删除已经有的类别的主名和id。 - -`labels.csv` 文件可以通过建立软链接的方式,在多个 mir repo 之间共享。 - -用户需要事先准备三个数据集: - -1. 训练集 dataset-training,带标注,用于初始模型的训练; - -2. 验证集 dataset-val,带标注,用于训练过程中模型的验证; - -3. 挖掘集 dataset-mining,这是一个比较大的待挖掘的数据集。 - -用户通过下述命令导入这三个数据集: - -``` -$ cd ~/mir-demo-repo -$ mir import --index-file /path/to/training-dataset-index.tsv \ # 数据集index.tsv路径 - --annotation-dir /path/to/training-dataset-annotation-dir \ # 标注路径 - --gen-dir ~/ymir-assets \ # 资源存储路径 - --dataset-name 'dataset-training' \ # 数据集名称 - --dst-rev 'dataset-training@import' # 结果分支及操作任务名称 -$ mir checkout master -$ mir import --index-file /path/to/val-dataset-index.tsv \ - --annotation-dir /path/to/val-dataset-annotation-dir \ - --gen-dir ~/ymir-assets \ - --dataset-name 'dataset-val' \ - --dst-rev 'dataset-val@import' -$ mir checkout master -$ mir import --index-file /path/to/mining-dataset-index.tsv \ - --annotation-dir /path/to/mining-dataset-annotation-dir \ - --gen-dir ~/ymir-assets \ - --dataset-name 'dataset-mining' \ - --dst-rev 'dataset-mining@import' -``` - -任务全部执行成功以后,可以通过以下命令: - -``` -$ mir branch -``` - -查看当前 mir repo 的分支情况,现在用户应该可以看到此 repo 有四个分支:master, dataset-training, dataset-val, dataset-mining,并且当前 repo 位于分支dataset-mining上。 - -用户也可以通过以下命令查看任何一个分支的情况: - -``` -$ mir show --src-rev dataset-mining -``` - -系统会有以下输出: - -``` -person;cat;car;airplane - -metadatas.mir: 200 assets, tr: 0, va: 0, te: 0, unknown: 200 -annotations.mir: hid: import, 113 assets -tasks.mir: hid: import -``` - -第一行和第二行分别是预定义关键字和用户自定义关键字(在这个输出中,用户自定义关键字为空),后面几行分别是当前分支下的资源数量,标注数量以及任务情况。 - -### 4.2.3 合并及筛选 -训练模型需要训练集和验证集,通过以下命令将 dataset-training 和 dataset-val 合成一个: - -``` -$ mir merge --src-revs tr:dataset-training@import;va:dataset-val@import \ # 待合并分支 - --dst-rev tr-va@merged \ # 结果分支及操作任务名称 - -s host # 策略:依据主体分支解决冲突 -``` - -合并完成后,可以看到当前 repo 位于 tr-va 分支下,可以使用 mir show 命令查看合并以后的分支状态: - -``` -$ mir show --src-revs HEAD # HEAD指代当前分支,也可以用tr-va这个具体的分支名称代替 -``` - -系统会有以下输出: - -``` -person;cat;car;airplane - -metadatas.mir: 3510 assets, tr: 2000, va: 1510, te: 0, unknown: 0 -annotations.mir: hid: merged, 1515 assets -tasks.mir: hid: merged -``` - -假设合并之前的 dataset-training 和 dataset-val 分别有2000和1510张图像,可以看到合并后的分支中有2000张图像作为训练集,1510张图像作为验证集。 -假设我们只想训练识别人和猫的模型,我们首先从这个大数据集里面筛选出现人或猫的资源: - -``` -mir filter --src-revs tr-va@merged \ - --dst-rev tr-va@filtered \ - -p 'person;cat' -``` - -### 4.2.4 训练第一个模型 -首先从 dockerhub 上拉取训练镜像和挖掘镜像: - -``` -docker pull industryessentials/executor-det-yolov4-training:release-0.1.2 -docker pull industryessentials/executor-det-yolov4-mining:release-0.1.2 -``` - -并使用以下命令开始训练过程: - -``` -mir train -w /tmp/ymir/training/train-0 \ - --media-location ~/ymir-assets \ # import时的资源存储路径 - --model-location ~/ymir-models \ # 训练完成后的模型存储路径 - --task-config-file ~/training-config.yaml \ # 训练参数配置文件,到训练镜像中获取 - --src-revs tr-va@filtered \ - --dst-rev training-0@trained \ - --executor industryessentials/executor-det-yolov4-training:release-0.1.2 # 训练镜像 -``` - -模型训练完成后,系统会输出模型id,用户可以在~/ymir-models中看到本次训练好的模型打包文件。 - -### 4.2.5 挖掘 - -上述模型是基于一个小批量数据集训练得到的,通过挖掘,可以从一个大数据集中得到对于下一步训练模型最为有效的资源。 -用户使用下述命令完成挖掘过程: - -``` -mir mining --src-revs dataset-mining@import \ # 导入的挖掘分支 - --dst-rev mining-0@mining \ # 挖掘的结果分支 - -w /tmp/ymir/mining/mining-0 \ # 本次任务的临时工作目录 - --topk 200 \ # 挖掘结果的图片数量 - --model-location ~/ymir-models \ - --media-location ~/ymir-assets \ - --model-hash \ # 上一步训练出来的模型id - --cache /tmp/ymir/cache \ # 资源缓存 - --task-config-file ~/mining-config.yaml \ # 挖掘参数配置文件,到挖掘镜像中获取 - --executor industryessentials/executor-det-yolov4-mining:release-0.1.2 -``` - -### 4.2.6 标注 -现在,系统已经挖掘出了对于模型训练最有效的200张图像,这些图像被保存在分支mining中,接下来的任务是将这些资源导出,送给标注人员进行标注。 -用户可以通过下述命令完成导出过程: - -``` -mir export --asset-dir /tmp/ymir/export/export-0/assets \ # 资源导出目录 - --annotation-dir /tmp/ymir/export/export-0/annotations \ # 导出标注目录 - --media-location ~/ymir-assets \ # 资源存储目录 - --src-revs mining-0@mining \ - --format none # 不导出标注 -find /tmp/ymir/export/export-0/assets > /tmp/ymir/export/export-0/index.tsv -``` - -导出完成后,可以在/tmp/ymir/export/export-0/assets位置看到导出的图片,用户可以将这些图片送去标注,标注需要按VOC格式保存,假设保存路径仍然为/tmp/ymir/export/export-0/annotations。 -标注完成后,用户可以使用与[4.2.2](#422-建立本地repo并导入数据)中的导入命令类似的方式导入数据: - -``` -$ mir import --index-file /tmp/ymir/export/export-0/index.tsv - --annotation-dir /tmp/ymir/export/export-0/annotations \ # 标注路径 - --gen-dir ~/ymir-assets \ # 资源存储路径 - --dataset-name 'dataset-mining' \ # 数据集名称 - --dst-rev 'labeled-0@import' # 结果分支及操作任务名称 -``` - -### 4.2.7 合并 -现在的工作空间的分支labeled-0中已经包含了挖掘出来的200张新的训练图像,可以通过前述的merge将其和原来的训练集合并在一起: - -``` -$ mir merge --src-revs tr-va@filtered;tr:labeled-0@import \ # 待合并分支 - --dst-rev tr-va-1@merged \ # 结果分支及操作任务名称 - -s host # 策略:依据主体分支解决冲突 -``` - -### 4.2.8 训练第二个模型 -现在在分支tr-va-1上,已经包含了前一次训练所用的训练集和验证集,也包含了后来通过数据挖掘得出的新的200张训练集加人工标注,可以通过以下命令在此集合上训练一个新的模型出来: - -``` -mir train -w /tmp/ymir/training/train-1 \ # 每个不同的训练和挖掘任务都用不同的工作目录 - --media-location ~/ymir-assets \ - --model-location ~/ymir-models \ - --task-config-file ~/training-config.yaml \ # 训练参数配置文件,到训练镜像中获取 - --src-revs tr-va-1@merged \ # 使用合成以后的分支 - --dst-rev training-1@trained \ - --executor industryessentials/executor-det-yolov4-training:release-0.1.2 -``` - -## 4.3. 命令参数手册 - -Ymir-command-api.211028 - -**通用参数格式与定义** - -| 参数名 | 变量名 | 类型与格式 | 定义 | -|----------------|----------|-------------|----------------------------------------------------------| -| --root / -r | mir_root | str | 需要初始化的路径,如果没有指定,则为当前路径 | -| --dst-rev | dst_rev | str | 1. 目标rev,仅限单个 | -| | | rev@tid | 2. 所有修改将保存在此rev的tid上 | -| | | | 3. 如果是一个新的rev则先checkout到第一个src-revs再创建 | -| | | | 4. tid必须,rev必须 | -| --src-revs | src_revs | str | 1. 数据来源rev,多个用分号隔开(仅merge支持,其他cmd仅支持单个) | -| | | typ:rev@bid | 2. typ可选,只在merge有效果,支持前置用途标识符(tr/va/te),为空,则表示使用原rev中的设置 | -| | | | 3. bid可选,若为空则读head task id | -| | | | 4. rev不能为空 | -| | | | 注意:当出现多个revs,例如a1@b1;a2@b2,需要用引号将其括起来,因为分号是Linux命令分隔符。 | - -**mir init** - -| DESCRIPTION | | | -| ------------------------------------------------------- | -------- | --------- | -| mir init [--root ] | | | -| 将当前路径,或者-root指定的路径初始化为一个mir root。 | | | -| ARGS(ARGS名称、run_with_args中的参数名称、类型、说明) | | | -| --root / -r | mir_root | str,可选 | -| RETURNS | | | -| 正常初始化:返回0 | | | -| 异常:其他error code | | | - -**mir branch** - -| DESCRIPTION | | | -| ------------------------------ | -------- | --------- | -| mir branch [--root ] | | | -| 列出当前本地或远程的所有分支 | | | -| ARGS | | | -| --root / -r | mir_root | str,可选 | -| RETURNS | | | +在这一节里,我们需要使用命令行实现上图所示的流程,其他流程也可以类似实现。具体操作请查看[命令行使用说明](https://github.com/IndustryEssentials/ymir/wiki/%E5%91%BD%E4%BB%A4%E8%A1%8C%E4%BD%BF%E7%94%A8%E8%AF%B4%E6%98%8E)。 # 5. 获取代码 -## 5.1 YMIR repos - -YMIR项目由三部分组成: - -1. 后端 [https://github.com/IndustryEssentials/ymir-backend](https://github.com/IndustryEssentials/ymir/tree/master/ymir/backend),负责任务分发与管理 - -2. 前端 [https://github.com/IndustryEssentials/ymir-web](https://github.com/IndustryEssentials/ymir/tree/master/ymir/web),交互界面 - -3. 命令行 [https://github.com/IndustryEssentials/ymir-cmd](https://github.com/IndustryEssentials/ymir/tree/master/ymir/command),CLI界面,管理底层标注与图像数据 - -## 5.2 代码贡献 +## 5.1 代码贡献 YMIR repo中的任何代码都应遵循编码标准,并将在CI测试中进行检查。 @@ -787,78 +385,14 @@ YMIR repo中的任何代码都应遵循编码标准,并将在CI测试中进行 也可以查看 [MSFT编码风格](https://github.com/Microsoft/Recommenders/wiki/Coding-Guidelines) 来获取更多的建议。 -## 5.3 关于训练,推理与挖掘镜像 +## 5.2 关于训练,推理与挖掘镜像 -[查看这篇文档](docs/ymir-cmd-container.md)获取更多细节。 +[查看这篇文档](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/ymir-cmd-container.md)获取更多细节。 # 6. 设计理念 - -## 6.1. Life of a dataset - -### 6.1.1 数据集介绍 - -数据集由Metadata(元数据)与媒体文件组成,元数据具有下述特征: - -* 它拥有唯一ID,系统有一个初始的默认Metadata状态,为空; -* 它拥有一个资源列表,列表中每个元素都指向一个实际的资源,Metadata不实际保存资源,只维护此资源列表; -* 它拥有若干keywords,用户可以通过这些keywords搜索到某个特定的Metadata状态; -* 用户可以为某个metadata新开分支,并在新开的分支上进行操作,在新分支上的操作不影响原metadata的状态,且原metadata仍可以被用户追溯,这些操作包括但不限于: - - (1)添加资源 - (2)添加或修改标注 - (3)添加或修改关键词 - (4)过滤资源 - (5)合并两个不同的metadatas - -* 用户可以在不同metadata之间自由跳转; -* 用户可以查询metadata的历史; -* 用户可以将metadata打上自己的tag,便于通过tag精确查找; -* 用户也可以向metadata添加keywords,便于keywords模糊搜索; -* 用户可以通过某种方式读取一个metadata中所包含的资源,并将这些资源用于浏览、训练等。 - -从以上描述可以看出,对于metadata的管理,类似于VCS(版本管理系统),用户可以有下面几种完全不同的使用方式与场景: - -**场景一**: 直接从最开始的metadata,进行筛选过程,选出并使用符合要求的数据,如下图所示: - -![场景一](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%9C%BA%E6%99%AF%E4%B8%80.jpeg) - -每当用户需要开始一项新任务时: -* 用户从当前的主分支内签出一个新的feature分支,得到处于feature#1状态的metadata; -* 用户在此新分支的metadata上进行数据筛选和其他任务,得到处于feature#2状态的metadata; -* 当确认这个metadata适合自己的训练任务,则可以使用这个数据开始训练; -* 此时,其他用户对master分支的metadata进行更改,也不会影响到用户正在使用的训练数据。 - -**场景二**:通过tag或keywords搜索到某个metadata,并开始筛选过程,直到得出符合要求的数据,然后使用该数据,如下图所示: - -![场景二](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%9C%BA%E6%99%AF%E4%BA%8C.jpeg) - -此时,每当用户需要开展一项新任务时: -* 通过keywords和tag等方式,搜索到一个基本符合自己要求的metadata -* 在此基础上,签出一个新分支 -* 在新分支上继续进行数据筛选或清洗,得到真正符合要求的数据 -* 利用此数据进行训练 - -**场景三**:增量合并。假设用户已经使用某个metadata完成了模型的训练任务,此时资源库与主分支的metadata有更新,用户希望将这一部分更新合并到当前使用的metadata中: - -![场景三](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%9C%BA%E6%99%AF%E4%B8%89.jpeg) - -假设用户现在在feature#2,用户需要进行如下操作: -* 切回主分支master -* 对master#2 - master#1这个增量部分,重复之前做过的任务,得到feature#2+ -* 切回feature#2,合并feature#2+,得到feature#3 - -### 6.1.2 分支与数据集的管理 - -本节的论述基于以下假设: -* 用户数据以数据集为单位分批导入 -* 每个数据集是一个独立分支; -* 对每个数据集的更改及维护都在本分支上进行; -* master分支始终为空。 -这种管理方式如下图所示: - -![分支及数据集管理](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/%E5%88%86%E6%94%AF%E5%8F%8A%E6%95%B0%E6%8D%AE%E9%9B%86%E7%AE%A1%E7%90%86.jpeg) - -我们使用Git中代码版本控制的概念来管理我们的数据和模型。我们使用分支的概念创建新项目,以便同一组映像上的不同任务可以并行运行。数据集的增加、检索、更新和删除以及基本操作都创建提交到分支。从逻辑上讲,每次提交都存储数据集或新模型的更新版本,以及导致此更改的操作的元数据。最后,只有数据更改被合并到主分支,这在概念上,聚合了该平台上许多项目注释的所有数据。 + +我们使用Git中代码版本控制的概念来管理我们的数据和模型。我们使用分支的概念创建新项目,以便同一组映像上的不同任务可以并行运行。数据集的增加、检索、更新和删除以及基本操作都创建提交到分支。从逻辑上讲,每次提交都存储数据集或新模型的更新版本,以及导致此更改的操作的元数据。最后,只有数据更改被合并到主分支,这在概念上,聚合了该平台上许多项目注释的所有数据。具体设计理念请查看 +[Life of a dataset](https://github.com/IndustryEssentials/ymir/wiki/%E6%95%B0%E6%8D%AE%E9%9B%86%E6%B5%81%E8%BD%AC%E8%BF%87%E7%A8%8B)。 # 7.MISC @@ -884,15 +418,20 @@ YMIR repo中的任何代码都应遵循编码标准,并将在CI测试中进行 尚未在Windows服务器完备测试,暂时无法提供服务支持。 +* 如何导入外部模型 + +参考[此文档](https://github.com/IndustryEssentials/ymir/blob/dev/dev_docs/import-extra-models.md)。 + -[![All Contributors](https://img.shields.io/badge/All%20Contributors-8-brightgreen)](#contributors-) +[![All Contributors](https://img.shields.io/badge/All%20Contributors-9-brightgreen)](#contributors-) - - - - - - - - + + + + + + + + + diff --git a/dev_docs/import-extra-models.md b/dev_docs/import-extra-models.md new file mode 100644 index 0000000000..f0e20b1b6f --- /dev/null +++ b/dev_docs/import-extra-models.md @@ -0,0 +1,71 @@ +# 如何导入外部模型 + +## 外部模型的准备 + +假设有一个在 ymir 系统外部进行的基于 mxnet 的训练过程,这个训练过程在第 1000 个 epoch 的时候产生了模型 `model-1000.params` 和 `model-symbol.json`,其 mAP 是 0.3,在第 2000 个 epoch 的时候产生了模型 `model-2000.params` 和 `model-symbol.json`,其 mAP 是 0.6,现在想要将这个过程中产生的模型导入 ymir 系统中。 + +在导入外部模型之前,需要做如下准备: + +1. 将训练过程中产生的文件,如 `model-1000.params`, `model-2000.params`, `model-symbol.json`,拷贝到同一路径下; + +2. 在此路径下生成 `ymir-info.yaml` 文件,其格式如下: + +``` yaml +executor_config: + class_names: # 训练目标,必要 + - person + - cat +stages: # 训练过程中产生的 model stages,必要 + stage_1000: # 在 epoch 1000 时产生的 model stage name,必要 + files: # 此 model stage 对应的文件,必要 + - model-1000.params + - model-symbol.json + mAP: 0.3 # 此 model stage 的 mAP,必要 + stage_name: stage_1000 # model stage name,必要 + timestamp: 1655975204 # model stage 的创建时间,用 `ls --time-style=+%s -l` 取得文件创建时间,必要 + stage_2000: + files: + - model-2000.params + - model-symbol.json + mAP: 0.6 + stage_name: stage_2000 + timestamp: 1655975205 +best_stage_name: stage_2000 # 最好的 model stage name,必要 +task_context: + executor: sample-executor # 训练模型时所用的镜像,非必要 + mAP: 0.6 # 最好的 model stage 对应的 mAP + producer: fenrir-z # 模型作者 + task_parameters: '{"keywords": ["person", "cat"]}' # keywords 与 class_names 一致, 注意引号 +``` + +3. 将 `ymir-info.yaml` 和所有模型文件打包: + +``` bash +tar -czvf model.tar.gz model-1000.params model-2000.params model-symbol.json ymir-info.yaml +``` + +## 如何导入 + +可以使用 ymir 的模型导入功能,也可以使用以下命令行: + +``` bash +mir models --package-path /path/to/model.tar.gz + --root /path/to/mir/repo + --dst-rev model-0@model-0 # xxx@xxx 格式,导入模型即意味着产生一个新的模型分支 + --model-location /path/to/ymir-models # ymir 系统模型保存路径 + -w /path/to/tmp/work/dir # 模型导入时的临时工作目录 +``` + +如果导入成功,控制台会显示以下信息: + +``` plain +pack success, model hash: xxxxxxxxxxxxxxxx, best_stage_name: stage_2000, mAP: 0.6 +``` + +这里产生的 model hash 就是此模型在 ymir 中的 id,当使用命令行进行挖掘,推理时,可以提供此 model hash 和 model stage 给相应的命令。 + +## 限制与注意事项 + +1. 在 `外部模型的准备` 一节中的第 2 步中标明 `必要` 的字段都需要提供 + +2. 模型的继续训练,推理,挖掘等任务需要配合特定镜像完成,所以需要在导入模型时,明确其使用的镜像,使其在后续流程中可用 diff --git a/docs/ymir-cmd-container.md b/dev_docs/ymir-cmd-container.md similarity index 83% rename from docs/ymir-cmd-container.md rename to dev_docs/ymir-cmd-container.md index 44aa51fa17..8899c56262 100644 --- a/docs/ymir-cmd-container.md +++ b/dev_docs/ymir-cmd-container.md @@ -1,5 +1,11 @@ # ymir 与功能性 docker container 数据传输接口 +| 协议文档版本 | ymir 版本 | 说明 | 镜像适配方式 | +| --- | --- | --- | --- | +| [0.0.0](https://raw.githubusercontent.com/IndustryEssentials/ymir/release-1.1.0/docs/ymir-cmd-container.md) | 0.0.0 - 1.1.0 | 初始版本 | | +| [1.0.0](https://raw.githubusercontent.com/IndustryEssentials/ymir/ymir-pa/docs/ymir-cmd-container.md) | 1.2.0 - 1.2.2 | 增加关于中间模型的描述 | 使用 `write_model_stage` 方法保存训练产出的中间模型 | +| 1.1.0 | 2.0.0 - | 4.3.2 节,训练完成后,模型保存策略更改
4.4.2 节,推理完成后,推理结果保存的节点由 annotations 改为 boxes | 1. 保存中间模型时,将模型文件保存至以 `中间模型名称` 命名的子目录中
2. 保存推理结果时,将原来的 annotations 键改为 boxes | + ## 1. 关于此文档 此文档用于规定 ymir 与功能性 docker container 之间的数据交互方式,这些 docker container 用于模型训练,挖掘及推理任务。 @@ -207,6 +213,22 @@ task_0 1622552975 1 done * `pretrained_model_params`: 预训练模型文件的路径列表,如果留空,则从头开始训练,如果非空,则从这个列表中找到镜像支持的模型文件,并在此模型的基础上继续训练 +* `export_format`: 希望 ymir 向此镜像提供的数据格式,由 `标注格式` 与 `图像格式` 两部分组成,中间用英文冒号分隔,例如 `det-voc:raw` 表示导出原始图像,以及 voc 格式的检测标注,如果此项留空或者不存在,则只导出图像,不导出标注 + + * `标注格式` 可取以下值: + + * `det-voc`: 导出 voc 格式的检测标注 + + * `det-ark`: 导出 csv 格式的检测标注(class id, x, y, w, h, annotation quality, rotate angle) + + * `det-ls-json`: 导出适合 LabelStudio 使用的检测标注 + + * `seg-poly`: 导出 polygon 格式的分割标注 + + * `seg-mask`: 导出 mask 类型的分割标注 + + * `图像格式` 目前只能指定为 `raw` + #### 4.3.2. 输出挂载点 | 路径 | 说明 | @@ -214,15 +236,27 @@ task_0 1622552975 1 done | /out/log.txt | 参考共同部分 | | /out/monitor.txt | 参考共同部分 | | /out/monitor-log.txt | 参考共同部分 | -| /out/models | 必要,最终生成的模型的输出目录,里面直接存放模型文件,没有下级子目录。
必须有一个 `result.yaml` 文件,格式参考注1 | - -注1. `result.yaml` 文件的格式如下,其中 model 节点填入 `/out/models` 下的模型文件名(不带目录名 `/out/models`): - -``` -map: 1.000 -model: - - 149_1.000-symbol.json - - 149_1.000-0149.params +| /out/models | 必要,最终生成的模型的输出目录,模型文件存放在以 stage_name 命名的子目录中。
/out/models 下必须有一个 `result.yaml` 文件,格式参考注1 | + +注1. `result.yaml` 文件的格式如下: + +``` yaml +best_stage_name: epoch_50 # 最优的中间模型名称 +model_stages: + epoch_10: # 中间模型名称:epoch_10 + files: # 中间模型对应的文件列表,这些文件在 /out/models 下面 + - 149_1.000-symbol.json + - 149_1.000-0149.params + mAP: 0.6 # 中间模型对应的 mAP + stage_name: epoch_10 + timestamp: 1663934682 # 创建时间对应的 timestamp + epoch_50: + files: + - 149_1.000-symbol.json + - 149_1.000-0149.params + mAP: 0.8 + stage_name: epoch_50 + timestamp: 1663934682 ``` ### 4.4. inference / mining 镜像输入/输出挂载格式 @@ -274,8 +308,8 @@ model: ``` {'detection': { - asset-name-0: {'annotations': [{'box': {'x': 30, 'y': 30, 'w': 50, 'h': 50}, 'class_name': 'cat','score': 0.8}, ...]}, - asset-name-1: {'annotations': [...]}, + asset-name-0: {'boxes': [{'box': {'x': 30, 'y': 30, 'w': 50, 'h': 50}, 'class_name': 'cat','score': 0.8}, ...]}, + asset-name-1: {'boxes': [...]}, ... } } diff --git a/dev_docs/ymir-dataset-zh-CN.md b/dev_docs/ymir-dataset-zh-CN.md new file mode 100644 index 0000000000..994ee02e7f --- /dev/null +++ b/dev_docs/ymir-dataset-zh-CN.md @@ -0,0 +1,139 @@ +# ymir-executor 使用说明 + +本文档面向使用或定制[ymir-executor](https://github.com/IndustryEssentials/ymir-executor)的用户,在阅读本文档之前,建议阅读以下文档: + +- [ymir使用文件](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md) + +- [sample-executor](https://github.com/IndustryEssentials/ymir/tree/master/docker_executor/sample_executor) + + +## 外部数据集导入ymir-gui系统 + +- `<1G` 的数据集可以直接`本地导入`,将本地数据集压缩包上传到ymir系统中,数据集具体格式与voc类似,参考[ymir-cmd 准备外部数据](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#421-%E5%87%86%E5%A4%87%E5%A4%96%E9%83%A8%E6%95%B0%E6%8D%AE) + - [sample导入数据集](https://github.com/yzbx/ymir-executor-fork/releases/download/dataset/import_sample_dataset.zip) + + ![](images/ymir-local-import.png) + +- `>=1G` 的数据集可以通过`路径导入`,先将数据集复制到ymir工作目录下的子目录`ymir-sharing`,再输入相对路径导入 + ![](images/ymir-path-import.png) + +- [其它数据集导入ymir-gui系统的方式](https://github.com/IndustryEssentials/ymir/blob/master/README_zh-CN.md#321-%E8%BF%AD%E4%BB%A3%E6%95%B0%E6%8D%AE%E5%87%86%E5%A4%87) + + +## ymir系统与ymir-executor镜像的数据传输接口 + +- 参考[ymir 与功能性 docker container 数据传输接口](https://github.com/IndustryEssentials/ymir/blob/master/docs/ymir-cmd-container.md) + + - ymir会将`/in`与`/out`目录挂载到镜像中 + + - 镜像中需要自带`/img-man`目录,辅助ymir系统对镜像类型进行识别,并对超参数页面进行配置 + + - 镜像默认以`bash /usr/bin/start.sh`进行启动 + + - **注意所有 .tsv 和 .yaml 文件中出现的路径都是绝对路径** + +- [sample /in /out](https://github.com/yzbx/ymir-executor-fork/releases/download/dataset/sample_docker_input.zip) + + ![](images/sample_docker_input.png) + +- [sample /img-man](https://github.com/IndustryEssentials/ymir/tree/master/docker_executor/sample_executor/app) + + - 注意所有的`xxx-template.yaml`只能是一级`key:value`文件 + +### 索引文件 train-index.tsv / val-index.tsv / candidate-index.tsv + +- 每行由`图像的绝对路径` + `制表符` + `标注的绝对路径`构成 + +``` +{image_abs_path 1}\t{annotation_abs_path 1} +{image_abs_path 2}\t{annotation_abs_path 2} +... +``` + +- 注意 `candidate-index.tsv` 中只有`图像的绝对路径` + +- 图像为常见的jpg, png格式 + +- 默认标注为`txt`格式,其中`class_id, xmin, ymin, xmax, ymax`均为整数, 所有标注格式介绍见[ymir输入镜像的标注格式]() + +``` +class_id, xmin, ymin, xmax, ymax, bbox_quality +``` + + +### 超参数配置文件 config.yaml + +用户可以在超参数页面看到`xxx-template.yaml`的信息,而`config.yaml` 中的信息,是用户更改过后的。 + +- 对于训练任务,`config.yaml`提供training-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 + +- 对于挖掘任务,`config.yaml`提供mining-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 + +- 对于推理任务,`config.yaml`提供infer-template.yaml中的配置 + ymir-gui 用户自定义配置 + ymir默认配置 + +``` +class_names: # ymir默认配置 +- bowl +- cat +- bottle +- cup +- spoon +gpu_id: '0' # ymir默认配置 +pretrained_model_params: [] # ymir训练时可选默认配置 +model_params_path: [] # ymir推理/挖掘时默认配置 +task_id: t0000001000002ebb7f11653630774 # ymir默认配置 +img_size: 640 # 用户自定义配置 +model: yolov5n # 用户自定义配置 +batch_size: 16 # 用户自定义配置 +``` + +### ymir路径配置文件 env.yaml + +存放一些路径信息,以及当前进行的任务信息 + +- 是否进行训练任务: `run_training: true|false` + +- 是否进行推理任务:`run_infer: true|false` + +- 是否进行挖掘任务: `run_mining: true|false` + +``` +input: + annotations_dir: /in/annotations # 标注文件存放目录 + assets_dir: /in/assets # 图像文件存放目录 + candidate_index_file: '' # 挖掘索引文件 + config_file: /in/config.yaml # 超参配置文件 + models_dir: /in/models # 预训练模型存放目录 + root_dir: /in # 输入根目录 + training_index_file: /in/train-index.tsv # 训练索引文件 + val_index_file: /in/val-index.tsv # 验证索引文件 +output: + infer_result_file: /out/infer-result.json # 推理结果文件 + mining_result_file: /out/result.tsv # 挖掘结果文件 + models_dir: /out/models # 训练任务模型权重与信息等存放目录 + monitor_file: /out/monitor.txt # 任务进度文件 + root_dir: /out # 输出根目录 + tensorboard_dir: /out/tensorboard # tensorboard结果文件目录 + training_result_file: /out/models/result.yaml # 训练任务结果文件 +run_infer: false +run_mining: false +run_training: true +task_id: t0000001000002ebb7f11653630774 # 任务id +``` + +## ymir输入镜像的标注格式 + +常见的目标检测标注格式有 `voc` 与 `coco`, ymir 除自身格式, 目前还支持`voc`格式,可在超参数页面通过设置`export_format`对ymir导入镜像的数据格式进行修改。 + +### 默认数据格式 +- `export_format=ark:raw`, 标注文件为`xxx.txt` + +### voc 数据格式 + +- `export_format=voc:raw`, 标注文件为`xxx.xml` + + ![](images/ymir-export-format.png) + +- 导出的标注为voc的xml格式 + + ![](images/ymir-export-voc-sample.png) diff --git a/docker-compose.dev.yml b/docker-compose.dev.yml new file mode 100644 index 0000000000..4a94f7defa --- /dev/null +++ b/docker-compose.dev.yml @@ -0,0 +1,139 @@ +version: "3.3" + +services: + prometheus: + image: prom/prometheus:v2.17.1 + user: "0:0" + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/etc/prometheus/console_libraries' + - '--web.console.templates=/etc/prometheus/consoles' + - '--web.enable-lifecycle' + volumes: + - ./metrics/config/prometheus.yml:/etc/prometheus/prometheus.yml:ro + - ${YMIR_PATH}/metrics/prometheus:/prometheus + depends_on: + - cadvisor + healthcheck: + test: wget -q --tries=1 -O- http://localhost:9090/-/healthy | grep -q "Healthy" + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + networks: + - ymirnetwork + + + blackbox: + image: prom/blackbox-exporter:v0.22.0 + volumes: + - ./metrics/config/blackbox.yml:/etc/blackbox_exporter/config.yml:ro + healthcheck: + test: wget -q --tries=1 -O- http://localhost:9115/-/healthy | grep -q "Healthy" + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + networks: + - ymirnetwork + + cadvisor: + image: gcr.io/cadvisor/cadvisor:v0.44.1-test + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + - /var/run/docker.sock:/var/run/docker.sock:ro + - /dev/disk/:/dev/disk:ro + healthcheck: + test: wget -q --tries=1 -O- http://localhost:8080/healthz | grep -q "ok" + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + command: + - '-housekeeping_interval=10s' + - '-docker_only=true' + networks: + - ymirnetwork + + nodeexporter: + image: prom/node-exporter:v1.3.1 + restart: unless-stopped + volumes: + - /proc:/host/proc:ro + - /sys:/host/sys:ro + - /:/rootfs:ro + command: + - '--path.procfs=/host/proc' + - '--path.rootfs=/rootfs' + - '--path.sysfs=/host/sys' + - '--collector.filesystem.mount-points-exclude=^/(sys|proc|dev|host|etc)($$|/)' + healthcheck: + test: wget -q --tries=1 -O- http://localhost:9100/health | grep -q "Node Exporter" + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + networks: + - ymirnetwork + + loki: + image: grafana/loki:main-87e20d9 + user: "0:0" + volumes: + - ${YMIR_PATH}/metrics/loki/:/etc/loki/ + - ./metrics/config/loki.yml:/etc/loki/loki.yml:ro + command: -config.file=/etc/loki/loki.yml + restart: on-failure:10 + healthcheck: + test: wget -q --tries=1 -O- http://localhost:3100/ready | grep -q "ready" + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + networks: + - ymirnetwork + + promtail: + image: grafana/promtail:main-87e20d9 + volumes: + - ${YMIR_PATH}/metrics/promtail:/etc/promtail/ + - ${YMIR_PATH}/ymir-data/logs:/var/log/:ro + - ${TENSORBOARD_ROOT}:/var/dockerlog:ro + - "${BACKEND_SANDBOX_ROOT}:${BACKEND_SANDBOX_ROOT}" + - ./metrics/config/promtail.yml:/etc/promtail/promtail.yml:ro + command: -config.file=/etc/promtail/promtail.yml + restart: on-failure:10 + healthcheck: + test: promtails --version + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + networks: + - ymirnetwork + + grafana: + image: grafana/grafana:9.1.0 + environment: + - "GF_DASHBOARDS_DEFAULT_HOME_DASHBOARD_PATH=/etc/grafana/provisioning/dashboards/docker_containers.json" + restart: unless-stopped + user: "0:0" + ports: + - 13000:3000 + depends_on: + - prometheus + volumes: + - ${YMIR_PATH}/metrics/grafana:/var/lib/grafana + - ./metrics/grafana/provisioning:/etc/grafana/provisioning + healthcheck: + test: wget -q --tries=1 -O- http://localhost:3000/api/health | grep -q "ok" + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s + networks: + - ymirnetwork diff --git a/docker-compose.label_studio.yml b/docker-compose.label_studio.yml index 435c483c32..7580af7fbb 100755 --- a/docker-compose.label_studio.yml +++ b/docker-compose.label_studio.yml @@ -1,6 +1,6 @@ version: "3.3" services: - label-studio: + labelstudio: image: heartexlabs/label-studio:latest env_file: - .env diff --git a/docker-compose.labelfree.yml b/docker-compose.labelfree.yml index 73bef0dd8c..0284fb88a5 100644 --- a/docker-compose.labelfree.yml +++ b/docker-compose.labelfree.yml @@ -1,7 +1,7 @@ version: "3.1" services: label_nginx: - image: labelfree/open_frontend:latest + image: labelfree/open_frontend:2.0.0 env_file: - .env ports: @@ -11,12 +11,13 @@ services: restart: always label_api: - image: labelfree/open:latest + image: labelfree/open:2.0.0 command: gunicorn --conf /code/config/gunicorn.py --log-config /code/config/logging.conf --chdir /code/labelfree app:app env_file: - .env volumes: - ${BACKEND_SANDBOX_ROOT}:${BACKEND_SANDBOX_ROOT} + - ${YMIR_PATH}/labelfree/backend_log:/var/log/gunicorn environment: SQLALCHEMY_DATABASE_URI: mysql+pymysql://root:${MYSQL_ROOT_PASSWORD}@label_mysql:3306/labelfree REDIS_HOST: label_redis @@ -32,10 +33,12 @@ services: restart: always label_celery_worker: - image: labelfree/open:latest - command: celery -A labelfree.app.celery worker -l info + image: labelfree/open:2.0.0 + command: celery -A labelfree.app.celery worker -l info -B env_file: - .env + volumes: + - ${BACKEND_SANDBOX_ROOT}:${BACKEND_SANDBOX_ROOT} environment: SQLALCHEMY_DATABASE_URI: mysql+pymysql://root:${MYSQL_ROOT_PASSWORD}@label_mysql:3306/labelfree REDIS_HOST: label_redis diff --git a/docker-compose.modeldeploy.yml b/docker-compose.modeldeploy.yml new file mode 100644 index 0000000000..4f06696425 --- /dev/null +++ b/docker-compose.modeldeploy.yml @@ -0,0 +1,46 @@ +version: "3.1" +services: + model_deploy_nginx: + image: pubalglib/model_deploy_nginx:1.0.0 + env_file: + - .env + ports: + - ${DEPLOY_MODULE_HOST_PORT}:80 + networks: + - model_deploy_network + restart: always + depends_on: + - model_deploy_api + + model_deploy_api: + image: pubalglib/model_deployment_server:1.0.0 + env_file: + - .env + volumes: + - ${YMIR_PATH}/deploy_module/model_deployment_server/media:/svc/app/model_deployment_server/media + - ${YMIR_PATH}/deploy_module/model_deployment_server/logs:/svc/app/model_deployment_server/logs + working_dir: /svc/app/model_deployment_server + environment: + TZ: Asia/Shanghai + networks: + - model_deploy_network + restart: always + depends_on: + - model_deploy_mysql + + model_deploy_mysql: + image: mysql:8 + env_file: + - .env + environment: + TZ: Asia/Shanghai + MYSQL_ROOT_PASSWORD: ${DEPLOY_MODULE_MYSQL_ROOT_PASSWORD} + MYSQL_DATABASE: model_deployment_db + volumes: + - ${YMIR_PATH}/deploy_module/mysql/data:/var/lib/mysql + networks: + - model_deploy_network + restart: always + +networks: + model_deploy_network: diff --git a/docker-compose.updater.yml b/docker-compose.updater.yml new file mode 100644 index 0000000000..3489e24180 --- /dev/null +++ b/docker-compose.updater.yml @@ -0,0 +1,14 @@ +version: "3.3" +services: + updater: + image: industryessentials/ymir-updater:1.1.0-2.0.0 + env_file: + - .env + environment: + - MODELS_PATH=${YMIR_PATH}/ymir-models + - ASSETS_PATH=${YMIR_PATH}/ymir-assets + volumes: + - "${BACKEND_SANDBOX_ROOT}:${BACKEND_SANDBOX_ROOT}" + - "${YMIR_PATH}/ymir-assets:${YMIR_PATH}/ymir-assets" + - "${YMIR_PATH}/ymir-models:${YMIR_PATH}/ymir-models" + command: python /app/start.py diff --git a/docker-compose.yml b/docker-compose.yml index fbef346caf..fe60d3ff8f 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ version: "3.3" services: backend: - image: industryessentials/ymir-backend + image: industryessentials/ymir-backend:release-2.0.0 env_file: - .env runtime: ${SERVER_RUNTIME} @@ -12,12 +12,9 @@ services: - MODELS_PATH=${YMIR_PATH}/ymir-models - ASSETS_PATH=${YMIR_PATH}/ymir-assets - CONTROLLER_LOG_PATH=${YMIR_PATH}/ymir-data/logs - # viz - - VIZ_REDIS_URI=redis://:@viz-redis # app - DATABASE_URI=mysql+pymysql://${MYSQL_INITIAL_USER}:${MYSQL_INITIAL_PASSWORD}@db/ymir - GRPC_CHANNEL=127.0.0.1:50066 - - VIZ_HOST=127.0.0.1:9099 - SHARED_DATA_DIR=/ymir-sharing - NGINX_DATA_PATH=/ymir-storage # arq @@ -25,6 +22,14 @@ services: # monitor - MONITOR_URL=http://127.0.0.1:9098 - APP_API_HOST=127.0.0.1:80 + # OpenPai + - OPENPAI_HOST=${OPENPAI_HOST} + - OPENPAI_TOKEN=${OPENPAI_TOKEN} + - OPENPAI_STORAGE=${OPENPAI_STORAGE} + - OPENPAI_USER=${OPENPAI_USER} + # viewer + - VIEWER_HOST_PORT=9527 + - MONGODB_URI=mongodb://${MYSQL_INITIAL_USER}:${MYSQL_INITIAL_PASSWORD}@mongodb:${MONGODB_HOST_PORT} volumes: - "${BACKEND_SANDBOX_ROOT}:${BACKEND_SANDBOX_ROOT}" - "${YMIR_PATH}/ymir-data/logs:/app_logs" @@ -44,11 +49,16 @@ services: depends_on: - db - redis - - viz-redis - tensorboard - - clickhouse + - mongodb command: bash -c 'cd /app && supervisord -nc supervisor/supervisord.conf' restart: on-failure:10 + healthcheck: + test: bash health_check.sh || exit 1 + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s networks: - ymirnetwork @@ -59,28 +69,35 @@ services: - ${YMIR_PATH}/redis:/var/log/redis - ${YMIR_PATH}/redis:/data restart: on-failure:10 - networks: - - ymirnetwork - - viz-redis: - image: industryessentials/ymir-viz-redis - restart: on-failure:10 + healthcheck: + test: ["CMD", "redis-cli","ping"] + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s networks: - ymirnetwork web: - image: industryessentials/ymir-web + image: industryessentials/ymir-web:release-2.0.0 volumes: - "${YMIR_PATH}/ymir-models:/data/ymir/ymir-models" - "${YMIR_PATH}/ymir-assets:/data/ymir/ymir-assets" - "${YMIR_PATH}/ymir-storage:/data/ymir/ymir-storage" environment: - LABEL_TOOL_HOST_URL=${LABEL_TOOL_HOST_URL} + - DEPLOY_MODULE_URL=${DEPLOY_MODULE_URL} ports: - ${NGINX_PORT}:80 restart: on-failure:10 depends_on: - backend + healthcheck: + test: curl --fail -s http://localhost:80/health || exit 1 + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s networks: - ymirnetwork @@ -94,29 +111,47 @@ services: volumes: - ${YMIR_PATH}/mysql:/var/lib/mysql restart: on-failure:10 + healthcheck: + test: ["CMD", "mysqladmin" ,"ping", "-h", "localhost"] + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s networks: - ymirnetwork tensorboard: - image: tensorflow/tensorflow:latest + image: tensorflow/tensorflow:2.9.1 env_file: - .env volumes: - "${TENSORBOARD_ROOT}:${TENSORBOARD_ROOT}" command: tensorboard --logdir ${TENSORBOARD_ROOT} --port 6006 --bind_all restart: on-failure:10 + healthcheck: + test: curl --fail -s http://localhost:6006 || exit 1 + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s networks: - ymirnetwork - clickhouse: - image: clickhouse/clickhouse-server - ulimits: - nofile: - soft: 262144 - hard: 262144 - volumes: - - ${YMIR_PATH}/clickhouseDB:/var/lib/clickhouse + mongodb: + image: mongo:4.4 + environment: + - MONGO_INITDB_ROOT_USERNAME=${MYSQL_INITIAL_USER} + - MONGO_INITDB_ROOT_PASSWORD=${MYSQL_INITIAL_PASSWORD} restart: on-failure:10 + command: mongod --port ${MONGODB_HOST_PORT} --auth --nojournal + volumes: + - ${YMIR_PATH}/ymir-viewer/mongo_data:/data/db + healthcheck: + test: echo 'db.runCommand("ping").ok' | mongo mongodb:${MONGODB_HOST_PORT}/test --quiet + interval: 60s + timeout: 6s + retries: 3 + start_period: 30s networks: - ymirnetwork diff --git a/docker_executor/public_index.md b/docker_executor/public_index.md index 86adbc1611..40b48aa477 100644 --- a/docker_executor/public_index.md +++ b/docker_executor/public_index.md @@ -2,6 +2,8 @@ |docker_name|functions|contributor|organization|description| |--|--|--|--|--| -|industryessentials/executor-det-yolov4-training:release-1.1.0|training|alfrat|-|yolov4 detection model training| -|industryessentials/executor-det-yolov4-mining:release-1.1.0|mining inference|alfrat|-|yolov4 detection model mining & inference| - +|youdaoyzbx/ymir-executor:ymir2.0.0-yolov5-cu111-tmi|training mining inference|modelai|-|yolov5| +|youdaoyzbx/ymir-executor:ymir2.0.0-yolov7-cu111-tmi|training mining inference|modelai|-|yolov7| +|youdaoyzbx/ymir-executor:ymir2.0.0-mmdet-cu111-tmi|training mining inference|modelai|-|mmdetection| +|youdaoyzbx/ymir-executor:ymir2.0.0-detectron2-cu111-tmi|training mining inference|modelai|-|detectron2| +|youdaoyzbx/ymir-executor:ymir2.0.0-nanodet-cu111-tmi|training mining inference|modelai|-|nanodet| diff --git a/docker_executor/sample_executor/Dockerfile b/docker_executor/sample_executor/Dockerfile index 8a2453054a..f20ae989d9 100644 --- a/docker_executor/sample_executor/Dockerfile +++ b/docker_executor/sample_executor/Dockerfile @@ -2,18 +2,24 @@ FROM python:3.8.13-alpine -COPY ./executor/requirements.txt ./ +# Add bash +RUN apk add bash +# Required to build numpy wheel +RUN apk add g++ + +COPY ./ymir_exc/requirements.txt ./ RUN pip3 install -r requirements.txt # tmi framework and your app COPY app /app RUN mkdir /img-man COPY app/*-template.yaml /img-man/ -COPY executor /app/executor +COPY ymir_exc/ymir_exc /app/ymir_exc # dependencies: write other dependencies here (pytorch, mxnet, tensorboard-x, etc.) # entry point for your app # the whole docker image will be started with `nvidia-docker run ` # and this command will run automatically -CMD python /app/start.py +RUN echo "python /app/start.py" > /usr/bin/start.sh +CMD bash /usr/bin/start.sh diff --git a/docker_executor/sample_executor/README.md b/docker_executor/sample_executor/README.md index f90a0716d6..6b3803bacc 100644 --- a/docker_executor/sample_executor/README.md +++ b/docker_executor/sample_executor/README.md @@ -96,9 +96,13 @@ app/start.py 展示了一个简单的镜像执行部分,此文档也将基于 3. 模型的保存 - * 在 `EnvConfig.output.models_dir` 中提供了模型的保存目录,用户可以使用 pytorch, mxnet, darknet 等训练框架自带的保存方法将模型保存在此目录下 + * 模型按当前正在进行的 stage name,分目录保存 - * 之后,可以使用 `result_writer.write_training_result()` 方法保存训练结果的摘要,这些内容包括:不带目录的模型名称,mAP,每个类别的 APs + * 在 `EnvConfig.output.models_dir` 中提供了模型的保存目录,用户可以使用 pytorch, mxnet, darknet 等训练框架自带的保存方法将模型保存在此目录下的以当前 stage_name 命名的子目录中 + + * 例如,如果需要保存 stage_name 为 'epoch-5000' 的模型,则需要把这些模型文件保存到 `os.path.join(env.get_current_env().output.model_dir, 'epoch-5000')` 目录下 + + * 之后,可以使用 `result_writer.write_model_stage()` 方法保存训练结果的摘要,这些内容包括:不带目录的模型名称列表,mAP,每个类别的 APs 4. 进度的记录:使用 `monitor.write_monitor_logger(percent)` 方法记录任务当前的进度,实际使用时,可以每隔若干轮迭代,根据当前迭代次数和总迭代次数来估算当前进度(一个 0 到 1 之间的数),调用此方法记录 diff --git a/docker_executor/sample_executor/app/infer-template.yaml b/docker_executor/sample_executor/app/infer-template.yaml index 7474733dd4..b3d45dd422 100644 --- a/docker_executor/sample_executor/app/infer-template.yaml +++ b/docker_executor/sample_executor/app/infer-template.yaml @@ -9,3 +9,4 @@ class_names: [] # just for test, remove this key in your own docker image idle_seconds: 3 # idle seconds for each task +seed: 15 diff --git a/docker_executor/sample_executor/app/start.py b/docker_executor/sample_executor/app/start.py index ca66b90412..a447a81339 100644 --- a/docker_executor/sample_executor/app/start.py +++ b/docker_executor/sample_executor/app/start.py @@ -1,10 +1,16 @@ import logging import os +import random import sys import time from typing import List -from executor import dataset_reader as dr, env, monitor, result_writer as rw +# view https://github.com/protocolbuffers/protobuf/issues/10051 for detail +os.environ.setdefault('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION', 'python') +from tensorboardX import SummaryWriter +from ymir_exc import dataset_reader as dr +from ymir_exc import env, monitor +from ymir_exc import result_writer as rw def start() -> int: @@ -40,27 +46,41 @@ def _run_training(env_config: env.EnvConfig) -> None: #! use `dataset_reader.item_paths` to read training or validation dataset items #! note that `dataset_reader.item_paths` is a generator + absent_count = 0 for asset_path, annotation_path in dr.item_paths(dataset_type=env.DatasetType.TRAINING): - logging.info(f"asset: {asset_path}, annotation: {annotation_path}") + isfile = os.path.isfile(asset_path) + if not isfile: + absent_count += 1 + logging.info(f"asset: {asset_path}, is file: {isfile}; " + f"annotation: {annotation_path}, is file: {os.path.isfile(annotation_path)}") + logging.info(f"absent: {absent_count}") #! use `monitor.write_monitor_logger` to write write task process percent to monitor.txt monitor.write_monitor_logger(percent=0.5) # suppose we have a long time training, and have saved the final model - #! use `env_config.output.models_dir` to get model output dir - with open(os.path.join(env_config.output.models_dir, 'model-0000.params'), 'w') as f: - f.write('fake params') - with open(os.path.join(env_config.output.models_dir, 'model-symbols.json'), 'w') as f: - f.write('fake json') - - #! use `rw.write_training_result` to save training result - rw.write_training_result(model_names=['model-0000.params', 'model-symbols.json'], - mAP=expected_mAP, - classAPs={class_name: expected_mAP - for class_name in class_names}) + #! model output dir: os.path.join(env_config.output.models_dir, your_stage_name) + stage_dir = os.path.join(env_config.output.models_dir, 'stage_00') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'model-0000.params'), 'w') as f: + f.write('fake model-0000.params') + with open(os.path.join(stage_dir, 'model-symbols.json'), 'w') as f: + f.write('fake model-symbols.json') + #! use `rw.write_model_stage` to save training result + rw.write_model_stage(stage_name='stage_00', files=['model-0000.params', 'model-symbols.json'], mAP=expected_mAP / 2) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) + write_tensorboard_log(env_config.output.tensorboard_dir) + + stage_dir = os.path.join(env_config.output.models_dir, 'stage_10') + os.makedirs(stage_dir, exist_ok=True) + with open(os.path.join(stage_dir, 'model-0010.params'), 'w') as f: + f.write('fake model-0010.params') + with open(os.path.join(stage_dir, 'model-symbols.json'), 'w') as f: + f.write('fake model-symbols.json') + rw.write_model_stage(stage_name='stage_10', files=['model-0010.params', 'model-symbols.json'], mAP=expected_mAP) + #! if task done, write 100% percent log logging.info('training done') monitor.write_monitor_logger(percent=1.0) @@ -78,15 +98,19 @@ def _run_mining(env_config: env.EnvConfig) -> None: #! use `dataset_reader.item_paths` to read candidate dataset items # note that annotations path will be empty str if there's no annotations in that dataset asset_paths = [] + absent_count = 0 for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): - logging.info(f"asset: {asset_path}") + isfile = os.path.isfile(asset_path) + if not isfile: + absent_count += 1 + logging.info(f"asset: {asset_path}, is file: {isfile}") asset_paths.append(asset_path) if len(asset_paths) == 0: raise ValueError('empty asset paths') #! use `monitor.write_monitor_logger` to write task process to monitor.txt - logging.info(f"assets count: {len(asset_paths)}") + logging.info(f"assets count: {len(asset_paths)}, absent: {absent_count}") monitor.write_monitor_logger(percent=0.5) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) @@ -109,28 +133,43 @@ def _run_infer(env_config: env.EnvConfig) -> None: class_names = executor_config['class_names'] idle_seconds: float = executor_config.get('idle_seconds', 60) trigger_crash: bool = executor_config.get('trigger_crash', False) + seed: int = executor_config.get('seed', 15) #! use `logging` or `print` to write log to console logging.info(f"infer config: {executor_config}") #! use `dataset_reader.item_paths` to read candidate dataset items # note that annotations path will be empty str if there's no annotations in that dataset asset_paths: List[str] = [] + absent_count = 0 for asset_path, _ in dr.item_paths(dataset_type=env.DatasetType.CANDIDATE): - logging.info(f"asset: {asset_path}") + isfile = os.path.isfile(asset_path) + if not isfile: + absent_count += 1 + logging.info(f"asset: {asset_path}, is file: {isfile}") asset_paths.append(asset_path) if len(asset_paths) == 0 or len(class_names) == 0: raise ValueError('empty asset paths or class names') #! use `monitor.write_monitor_logger` to write log to console and write task process percent to monitor.txt - logging.info(f"assets count: {len(asset_paths)}") + logging.info(f"assets count: {len(asset_paths)}, absent: {absent_count}") monitor.write_monitor_logger(percent=0.5) _dummy_work(idle_seconds=idle_seconds, trigger_crash=trigger_crash) #! write infer result - fake_annotation = rw.Annotation(class_name=class_names[0], score=0.9, box=rw.Box(x=50, y=50, w=150, h=150)) - infer_result = {asset_path: [fake_annotation] for asset_path in asset_paths} + fake_anns = [] + random.seed(seed) + for class_name in class_names: + x = random.randint(0, 100) + y = random.randint(0, 100) + w = random.randint(50, 100) + h = random.randint(50, 100) + ann = rw.Annotation(class_name=class_name, score=random.random(), box=rw.Box(x=x, y=y, w=w, h=h)) + + fake_anns.append(ann) + + infer_result = {asset_path: fake_anns for asset_path in asset_paths} rw.write_infer_result(infer_result=infer_result) #! if task done, write 100% percent log @@ -145,6 +184,16 @@ def _dummy_work(idle_seconds: float, trigger_crash: bool = False, gpu_memory_siz raise RuntimeError('app crashed') +def write_tensorboard_log(tensorboard_dir: str) -> None: + tb_log = SummaryWriter(tensorboard_dir) + + total_epoch = 30 + for e in range(total_epoch): + tb_log.add_scalar("fake_loss", 10 / (1 + e), e) + time.sleep(1) + monitor.write_monitor_logger(percent=e / total_epoch) + + if __name__ == '__main__': logging.basicConfig(stream=sys.stdout, format='%(levelname)-8s: [%(asctime)s] %(message)s', diff --git a/docker_executor/sample_executor/app/training-template.yaml b/docker_executor/sample_executor/app/training-template.yaml index 72d934b160..f114648dcc 100644 --- a/docker_executor/sample_executor/app/training-template.yaml +++ b/docker_executor/sample_executor/app/training-template.yaml @@ -4,8 +4,9 @@ gpu_id: '0' task_id: 'default-training-task' -pretrained_model_paths: [] +pretrained_model_params: [] class_names: [] +export_format: 'det-voc:raw' # just for test, remove this key in your own docker image expected_map: 0.983 # expected map for training task diff --git a/docker_executor/sample_executor/executor/monitor.py b/docker_executor/sample_executor/executor/monitor.py deleted file mode 100644 index 4eeb9cb51d..0000000000 --- a/docker_executor/sample_executor/executor/monitor.py +++ /dev/null @@ -1,11 +0,0 @@ -import time - -from executor import env - -TASK_STATE_RUNNING = 2 - - -def write_monitor_logger(percent: float) -> None: - env_config = env.get_current_env() - with open(env_config.output.monitor_file, 'w') as f: - f.write(f"{env_config.task_id}\t{time.time()}\t{percent:.2f}\t{TASK_STATE_RUNNING}\n") diff --git a/docker_executor/sample_executor/executor/requirements.txt b/docker_executor/sample_executor/executor/requirements.txt deleted file mode 100644 index 903c7a5920..0000000000 --- a/docker_executor/sample_executor/executor/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -pydantic>=1.8.2 -pyyaml>=5.4.1 \ No newline at end of file diff --git a/docker_executor/sample_executor/executor/result_writer.py b/docker_executor/sample_executor/executor/result_writer.py deleted file mode 100644 index e7c1cf16da..0000000000 --- a/docker_executor/sample_executor/executor/result_writer.py +++ /dev/null @@ -1,56 +0,0 @@ -import json -import os -from typing import Dict, List, Tuple - -from pydantic import BaseModel -import yaml - -from executor import env - - -class Box(BaseModel): - x: int - y: int - w: int - h: int - - -class Annotation(BaseModel): - class_name: str - score: float - box: Box - - -def write_training_result(model_names: List[str], mAP: float, classAPs: Dict[str, float], **kwargs: dict) -> None: - training_result = { - 'model': model_names, - 'map': mAP, - 'class_aps': classAPs, - } - training_result.update(kwargs) - - env_config = env.get_current_env() - with open(env_config.output.training_result_file, 'w') as f: - yaml.safe_dump(training_result, f) - - -def write_mining_result(mining_result: List[Tuple[str, float]]) -> None: - # sort desc by score - sorted_mining_result = sorted(mining_result, reverse=True, key=(lambda v: v[1])) - - env_config = env.get_current_env() - with open(env_config.output.mining_result_file, 'w') as f: - for asset_id, score in sorted_mining_result: - f.write(f"{asset_id}\t{score}\n") - - -def write_infer_result(infer_result: Dict[str, List[Annotation]]) -> None: - detection_result = {} - for asset_path, annotations in infer_result.items(): - asset_basename = os.path.basename(asset_path) - detection_result[asset_basename] = {'annotations': [annotation.dict() for annotation in annotations]} - - result = {'detection': detection_result} - env_config = env.get_current_env() - with open(env_config.output.infer_result_file, 'w') as f: - f.write(json.dumps(result)) diff --git a/docker_executor/sample_executor/tests/__main__.py b/docker_executor/sample_executor/tests/__main__.py index 8d5a2307f9..298a6f007a 100644 --- a/docker_executor/sample_executor/tests/__main__.py +++ b/docker_executor/sample_executor/tests/__main__.py @@ -5,12 +5,13 @@ def main(args: List[str]) -> int: - module_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..') + module_root = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'ymir_exc') # note that env.DEFAULT_ENV_FILE_PATH will change when test # so there should be only ONE process / thread when test cmd = [f"PYTHONPATH=$PYTHONPATH:{module_root}", 'pytest', '-vv', '-x', '--durations=0'] cmd.extend(args) + print(cmd) subprocess.check_call(' '.join(cmd), shell=True) diff --git a/docker_executor/sample_executor/tests/test_dataset_reader.py b/docker_executor/sample_executor/tests/test_dataset_reader.py index b3dbd0afd7..302f8c7142 100644 --- a/docker_executor/sample_executor/tests/test_dataset_reader.py +++ b/docker_executor/sample_executor/tests/test_dataset_reader.py @@ -4,16 +4,18 @@ import yaml -from executor import dataset_reader as dr, env, settings +from ymir_exc import dataset_reader as dr, env, settings class TestDatasetReader(unittest.TestCase): # life cycle def __init__(self, methodName: str = ...) -> None: super().__init__(methodName) - self._test_root = os.path.join('/tmp', 'test_tmi', *self.id().split(".")[-3:]) + self._test_root = os.path.join( + '/tmp', 'test_tmi', *self.id().split(".")[-3:]) self._custom_env_file = os.path.join(self._test_root, 'env.yml') - self._training_index_file = os.path.join(self._test_root, 'training-index.tsv') + self._training_index_file = os.path.join( + self._test_root, 'training-index.tsv') def setUp(self) -> None: settings.DEFAULT_ENV_FILE_PATH = self._custom_env_file @@ -69,15 +71,17 @@ def _deprepare_dirs(self) -> None: shutil.rmtree(self._test_root) def test_00(self) -> None: - training_list = list(dr.item_paths(dataset_type=dr.DatasetType.TRAINING)) + training_list = list(dr.item_paths( + dataset_type=env.DatasetType.TRAINING)) self.assertEqual(len(training_list), 3) # have 3 items - self.assertEqual(len(training_list[0]), 2) # each item have asset and annotations + # each item have asset and annotations + self.assertEqual(len(training_list[0]), 2) try: - dr.item_paths(dataset_type=dr.DatasetType.VALIDATION) + dr.item_paths(dataset_type=env.DatasetType.VALIDATION) except Exception as e: self.assertTrue(isinstance(e, ValueError)) try: - dr.item_paths(dataset_type=dr.DatasetType.CANDIDATE) + dr.item_paths(dataset_type=env.DatasetType.CANDIDATE) except Exception as e: self.assertTrue(isinstance(e, ValueError)) diff --git a/docker_executor/sample_executor/tests/test_env.py b/docker_executor/sample_executor/tests/test_env.py index 108582e096..f570155e76 100644 --- a/docker_executor/sample_executor/tests/test_env.py +++ b/docker_executor/sample_executor/tests/test_env.py @@ -4,7 +4,7 @@ import yaml -from executor import env, settings +from ymir_exc import env, settings class TestEnv(unittest.TestCase): diff --git a/docker_executor/sample_executor/tests/test_monitor.py b/docker_executor/sample_executor/tests/test_monitor.py index b0286f908d..dda337ca87 100644 --- a/docker_executor/sample_executor/tests/test_monitor.py +++ b/docker_executor/sample_executor/tests/test_monitor.py @@ -4,7 +4,7 @@ import yaml -from executor import env, monitor, settings +from ymir_exc import env, monitor, settings class TestMonitor(unittest.TestCase): diff --git a/docker_executor/sample_executor/tests/test_result_writer.py b/docker_executor/sample_executor/tests/test_result_writer.py index 67311d300f..39f4d96f7d 100644 --- a/docker_executor/sample_executor/tests/test_result_writer.py +++ b/docker_executor/sample_executor/tests/test_result_writer.py @@ -6,7 +6,7 @@ import yaml -from executor import result_writer as rw, settings +from ymir_exc import result_writer as rw, settings class TestResultWriter(unittest.TestCase): @@ -57,14 +57,17 @@ def _deprepare_dirs(self) -> None: shutil.rmtree(self._test_root) # protected: check results - def _check_training_result(self, model_names: List[str], mAP: float, classAPs: Dict[str, float], **kwargs) -> None: + def _check_model_stages(self, best_stage_name: str, mAP: float, stage_names: List[str]) -> None: with open(self._training_result_file, 'r') as f: - result_obj = yaml.safe_load(f) - self.assertEqual(result_obj['model'], model_names) - self.assertEqual(result_obj['map'], mAP) - self.assertEqual(result_obj['class_aps'], classAPs) - for k, v in kwargs.items(): - self.assertEqual(result_obj[k], v) + result_obj: dict = yaml.safe_load(f) + self.assertEqual(set(stage_names), set(result_obj['model_stages'].keys())) + self.assertEqual(best_stage_name, result_obj['best_stage_name']) + self.assertEqual(mAP, result_obj['map']) + + def _check_training_attachments(self, attachments: Dict[str, List[str]]) -> None: + with open(self._training_result_file, 'r') as f: + result_obj: dict = yaml.safe_load(f) + self.assertEqual(result_obj['attachments'], attachments) def _check_mining_result(self, mining_result: List[Tuple[str, float]]) -> None: with open(self._mining_result_file, 'r') as f: @@ -79,12 +82,27 @@ def _check_infer_result(self, infer_result: Dict[str, List[rw.Annotation]]) -> N infer_result_obj = json.loads(f.read()) self.assertEqual(set(infer_result_obj['detection'].keys()), set(infer_result.keys())) + # public: test cases + def test_write_model_stage_00(self) -> None: + stage_names = [f"epoch_{idx}" for idx in range(0, 12)] + for idx, stage_name in enumerate(stage_names): + rw.write_model_stage(stage_name=stage_name, + files=[f"model-{idx}.params", 'model-symbol.json'], + mAP=idx / 22, + timestamp=10 * idx + 1000000) + rw.write_model_stage(stage_name='best', + files=[f"model-best.params", 'model-symbol.json'], + mAP=1, + timestamp=900000, + attachments={'section_a': ['01', '02']}) + expected_stage_names = [f"epoch_{idx}" for idx in range(2, 12)] + expected_stage_names.append('best') + self._check_model_stages(stage_names=expected_stage_names, best_stage_name='best', mAP=1.0) + self._check_training_attachments(attachments={'section_a': ['01', '02']}) + def test_write_training_result(self) -> None: - model_names = ['model-symbols.json', 'model-0000.params'] - mAP = 0.86 - classAPs = {'cat': 0.86, 'person': 0.86} - rw.write_training_result(model_names=model_names, mAP=mAP, classAPs=classAPs, author='fake author') - self._check_training_result(model_names=model_names, mAP=mAP, classAPs=classAPs, author='fake author') + rw.write_training_result(model_names=['fake.model'], mAP=0.9, classAPs={}) + self._check_model_stages(stage_names=['default_best_stage'], best_stage_name='default_best_stage', mAP=0.9) def test_write_mining_result(self) -> None: mining_result = [('a', '0.1'), ('b', '0.3'), ('c', '0.2')] diff --git a/docker_executor/sample_executor/executor/.flake8 b/docker_executor/sample_executor/ymir_exc/.flake8 similarity index 100% rename from docker_executor/sample_executor/executor/.flake8 rename to docker_executor/sample_executor/ymir_exc/.flake8 diff --git a/docker_executor/sample_executor/executor/mypy.ini b/docker_executor/sample_executor/ymir_exc/mypy.ini similarity index 100% rename from docker_executor/sample_executor/executor/mypy.ini rename to docker_executor/sample_executor/ymir_exc/mypy.ini diff --git a/docker_executor/sample_executor/executor/requirements-ci.txt b/docker_executor/sample_executor/ymir_exc/requirements-ci.txt similarity index 100% rename from docker_executor/sample_executor/executor/requirements-ci.txt rename to docker_executor/sample_executor/ymir_exc/requirements-ci.txt diff --git a/docker_executor/sample_executor/ymir_exc/requirements.txt b/docker_executor/sample_executor/ymir_exc/requirements.txt new file mode 100644 index 0000000000..a44ddffe4e --- /dev/null +++ b/docker_executor/sample_executor/ymir_exc/requirements.txt @@ -0,0 +1,3 @@ +pydantic>=1.8.2 +pyyaml>=5.4.1 +tensorboardX>=2.4 \ No newline at end of file diff --git a/docker_executor/sample_executor/ymir_exc/setup.py b/docker_executor/sample_executor/ymir_exc/setup.py new file mode 100644 index 0000000000..4efcc067ae --- /dev/null +++ b/docker_executor/sample_executor/ymir_exc/setup.py @@ -0,0 +1,21 @@ +from setuptools import setup, find_packages + + +__version__ = '2.0.0.1019' + +requirements = [] +with open('requirements.txt') as f: + for line in f.read().splitlines(): + requirements.append(line) + +setup( + name='ymir-exc', + version=__version__, + python_requires=">=3.7", + install_requires=requirements, + author_email="contact.viesc@gmail.com", + description="ymir executor SDK: SDK for develop ymir training, mining and infer docker images", + url="https://github.com/IndustryEssentials/ymir", + packages=find_packages(exclude=["*tests*"]), + include_package_data=True, +) diff --git a/docker_executor/sample_executor/executor/__init__.py b/docker_executor/sample_executor/ymir_exc/ymir_exc/__init__.py similarity index 100% rename from docker_executor/sample_executor/executor/__init__.py rename to docker_executor/sample_executor/ymir_exc/ymir_exc/__init__.py diff --git a/docker_executor/sample_executor/executor/dataset_reader.py b/docker_executor/sample_executor/ymir_exc/ymir_exc/dataset_reader.py similarity index 52% rename from docker_executor/sample_executor/executor/dataset_reader.py rename to docker_executor/sample_executor/ymir_exc/ymir_exc/dataset_reader.py index 79fcfce89b..e24129950d 100644 --- a/docker_executor/sample_executor/executor/dataset_reader.py +++ b/docker_executor/sample_executor/ymir_exc/ymir_exc/dataset_reader.py @@ -1,26 +1,18 @@ -from enum import IntEnum, auto from typing import Iterator, Tuple -from executor import env +from ymir_exc import env -class DatasetType(IntEnum): - UNKNOWN = auto() - TRAINING = auto() - VALIDATION = auto() - CANDIDATE = auto() - - -def _index_file_for_dataset_type(env_config: env.EnvConfig, dataset_type: DatasetType) -> str: +def _index_file_for_dataset_type(env_config: env.EnvConfig, dataset_type: env.DatasetType) -> str: mapping = { - DatasetType.TRAINING: env_config.input.training_index_file, - DatasetType.VALIDATION: env_config.input.val_index_file, - DatasetType.CANDIDATE: env_config.input.candidate_index_file, + env.DatasetType.TRAINING: env_config.input.training_index_file, + env.DatasetType.VALIDATION: env_config.input.val_index_file, + env.DatasetType.CANDIDATE: env_config.input.candidate_index_file, } return mapping[dataset_type] -def item_paths(dataset_type: DatasetType) -> Iterator[Tuple[str, str]]: +def item_paths(dataset_type: env.DatasetType) -> Iterator[Tuple[str, str]]: file_path = _index_file_for_dataset_type(env.get_current_env(), dataset_type) if not file_path: raise ValueError(f"index file not set for dataset: {dataset_type}") @@ -29,9 +21,14 @@ def item_paths(dataset_type: DatasetType) -> Iterator[Tuple[str, str]]: for line in f: # note: last char of line is \n components = line.strip().split('\t') - if len(components) == 2: + if len(components) >= 2: yield (components[0], components[1]) elif len(components) == 1: yield (components[0], '') else: - raise ValueError(f"irregular index file: {file_path}") + # ignore empty lines + continue + + +def items_count(dataset_type: env.DatasetType) -> int: + return len(list(item_paths(dataset_type=dataset_type))) diff --git a/docker_executor/sample_executor/executor/env.py b/docker_executor/sample_executor/ymir_exc/ymir_exc/env.py similarity index 95% rename from docker_executor/sample_executor/executor/env.py rename to docker_executor/sample_executor/ymir_exc/ymir_exc/env.py index a8b5c42cfd..1a41430dac 100644 --- a/docker_executor/sample_executor/executor/env.py +++ b/docker_executor/sample_executor/ymir_exc/ymir_exc/env.py @@ -48,7 +48,7 @@ from pydantic import BaseModel import yaml -from executor import settings +from ymir_exc import settings class DatasetType(IntEnum): @@ -77,6 +77,7 @@ class EnvOutputConfig(BaseModel): mining_result_file: str = '/out/result.tsv' infer_result_file: str = '/out/infer-result.json' monitor_file: str = '/out/monitor.txt' + executor_log_file: str = '/out/ymir-executor-out.log' class EnvConfig(BaseModel): @@ -84,6 +85,7 @@ class EnvConfig(BaseModel): run_training: bool = False run_mining: bool = False run_infer: bool = False + protocol_version: str = '' input: EnvInputConfig = EnvInputConfig() output: EnvOutputConfig = EnvOutputConfig() diff --git a/docker_executor/sample_executor/ymir_exc/ymir_exc/monitor.py b/docker_executor/sample_executor/ymir_exc/ymir_exc/monitor.py new file mode 100644 index 0000000000..bc54e5767e --- /dev/null +++ b/docker_executor/sample_executor/ymir_exc/ymir_exc/monitor.py @@ -0,0 +1,34 @@ +import time + +from tensorboardX import SummaryWriter + +from ymir_exc import env + +TASK_STATE_RUNNING = 2 + + +def write_monitor_logger(percent: float) -> None: + env_config = env.get_current_env() + with open(env_config.output.monitor_file, 'w') as f: + f.write(f"{env_config.task_id}\t{time.time()}\t{percent:.2f}\t{TASK_STATE_RUNNING}\n") + + +def write_tensorboard_text(text: str, tag: str = None) -> None: + """ + donot call this function too often, tensorboard may + overwrite history log text with the same `tag` and `global_step` + """ + env_config = env.get_current_env() + tag = tag if tag else "default" + + # show the raw text format instead of markdown + text = f"```\n {text} \n```" + with SummaryWriter(env_config.output.tensorboard_dir) as f: + f.add_text(tag=tag, text_string=text, global_step=round(time.time() * 1000)) + + +def write_final_executor_log(tag: str = None) -> None: + env_config = env.get_current_env() + exe_log_file = env_config.output.executor_log_file + with open(exe_log_file) as f: + write_tensorboard_text(f.read(), tag=tag) diff --git a/docker_executor/sample_executor/ymir_exc/ymir_exc/result_writer.py b/docker_executor/sample_executor/ymir_exc/ymir_exc/result_writer.py new file mode 100644 index 0000000000..e434dcbe24 --- /dev/null +++ b/docker_executor/sample_executor/ymir_exc/ymir_exc/result_writer.py @@ -0,0 +1,113 @@ +import json +import logging +import os +import time +from typing import Dict, List, Tuple + +from pydantic import BaseModel +import yaml + +from ymir_exc import env + +_MAX_MODEL_STAGES_COUNT_ = 11 # 10 latest stages, 1 best stage + + +class Box(BaseModel): + x: int + y: int + w: int + h: int + + +class Annotation(BaseModel): + class_name: str + score: float + box: Box + + +def write_model_stage(stage_name: str, + files: List[str], + mAP: float, + timestamp: int = None, + attachments: Dict[str, List[str]] = None) -> None: + """ + Write model stage and model attachments + + Args: + stage_name (str): name to this model stage + files (List[str]): model file names for this stage + All files should under directory: `/out/models` + mAP (float): mean average precision of this stage + timestamp (int): timestamp (in seconds) + """ + if not stage_name or not files: + raise ValueError('empty stage_name or files') + if not stage_name.isidentifier(): + raise ValueError( + f"invalid stage_name: {stage_name}, need alphabets, numbers and underlines, start with alphabets") + + training_result: dict = {} # key: stage name, value: stage name, files, timestamp, mAP + + env_config = env.get_current_env() + try: + with open(env_config.output.training_result_file, 'r') as f: + training_result = yaml.safe_load(stream=f) + except FileNotFoundError: + pass # will create new if not exists, so dont care this exception + + model_stages = training_result.get('model_stages', {}) + + model_stages[stage_name] = { + 'stage_name': stage_name, + 'files': files, + 'timestamp': timestamp or int(time.time()), + 'mAP': mAP + } + + # best stage + sorted_model_stages = sorted(model_stages.values(), key=lambda x: (x.get('mAP', 0), x.get('timestamp', 0))) + training_result['best_stage_name'] = sorted_model_stages[-1]['stage_name'] + training_result['map'] = sorted_model_stages[-1]['mAP'] + + # if too many stages, remove a earlest one + if len(model_stages) > _MAX_MODEL_STAGES_COUNT_: + sorted_model_stages = sorted(model_stages.values(), key=lambda x: x.get('timestamp', 0)) + del_stage_name = sorted_model_stages[0]['stage_name'] + if del_stage_name == training_result['best_stage_name']: + del_stage_name = sorted_model_stages[1]['stage_name'] + del model_stages[del_stage_name] + logging.info(f"data_writer removed model stage: {del_stage_name}") + training_result['model_stages'] = model_stages + + # attachments + training_result['attachments'] = attachments or {} + + # save all + with open(env_config.output.training_result_file, 'w') as f: + yaml.safe_dump(data=training_result, stream=f) + + +def write_training_result(model_names: List[str], mAP: float, classAPs: Dict[str, float], **kwargs: dict) -> None: + write_model_stage(stage_name='default_best_stage', files=model_names, mAP=mAP) + + +def write_mining_result(mining_result: List[Tuple[str, float]]) -> None: + # sort desc by score + sorted_mining_result = sorted(mining_result, reverse=True, key=(lambda v: v[1])) + + env_config = env.get_current_env() + with open(env_config.output.mining_result_file, 'w') as f: + for asset_id, score in sorted_mining_result: + f.write(f"{asset_id}\t{score}\n") + + +def write_infer_result(infer_result: Dict[str, List[Annotation]]) -> None: + detection_result = {} + for asset_path, annotations in infer_result.items(): + asset_basename = os.path.basename(asset_path) + detection_result[asset_basename] = {'boxes': [annotation.dict() for annotation in annotations]} + + result = {'detection': detection_result} + env_config = env.get_current_env() + with open(env_config.output.infer_result_file, 'w') as f: + f.write(json.dumps(result)) diff --git a/docker_executor/sample_executor/executor/settings.py b/docker_executor/sample_executor/ymir_exc/ymir_exc/settings.py similarity index 100% rename from docker_executor/sample_executor/executor/settings.py rename to docker_executor/sample_executor/ymir_exc/ymir_exc/settings.py diff --git a/metrics/config/blackbox.yml b/metrics/config/blackbox.yml new file mode 100644 index 0000000000..f3d3497c0c --- /dev/null +++ b/metrics/config/blackbox.yml @@ -0,0 +1,10 @@ +modules: + http_2xx: + prober: http + http: + method: GET + grpc: + prober: grpc + grpc: + tls: true + preferred_ip_protocol: "ip4" diff --git a/metrics/config/loki.yml b/metrics/config/loki.yml new file mode 100644 index 0000000000..d3678fea0f --- /dev/null +++ b/metrics/config/loki.yml @@ -0,0 +1,67 @@ +auth_enabled: false + +server: + http_listen_port: 3100 + +ingester: + wal: + enabled: true + dir: /etc/loki/wal + lifecycler: + address: 0.0.0.0 + ring: + kvstore: + store: inmemory + replication_factor: 1 + final_sleep: 0s + chunk_idle_period: 5m # Any chunk not receiving new logs in this time will be flushed + max_chunk_age: 1h # All chunks will be flushed when they hit this age, default is 1h + chunk_target_size: 1048576 # Loki will attempt to build chunks up to 1.5MB, flushing first if chunk_idle_period or max_chunk_age is reached first + chunk_retain_period: 30s # Must be greater than index read cache TTL if using an index cache (Default index read cache TTL is 5m) + max_transfer_retries: 0 # Chunk transfers disabled + +schema_config: + configs: + - from: 2020-10-24 + store: boltdb-shipper + object_store: filesystem + schema: v11 + index: + prefix: index_ + period: 24h + +storage_config: + boltdb_shipper: + active_index_directory: /etc/loki/boltdb-shipper-active + cache_location: /etc/loki/boltdb-shipper-cache + cache_ttl: 24h # Can be increased for faster performance over longer query periods, uses more disk space + shared_store: filesystem + filesystem: + directory: /etc/loki/chunks + +compactor: + working_directory: /etc/loki/boltdb-shipper-compactor + shared_store: filesystem + +limits_config: + reject_old_samples: true + reject_old_samples_max_age: 168h + +chunk_store_config: + max_look_back_period: 0s + +table_manager: + retention_deletes_enabled: false + retention_period: 0s + +ruler: + storage: + type: local + local: + directory: /etc/loki/rules + rule_path: /etc/loki/rules-temp + alertmanager_url: http://localhost:9093 + ring: + kvstore: + store: inmemory + enable_api: true diff --git a/metrics/config/prometheus.yml b/metrics/config/prometheus.yml new file mode 100644 index 0000000000..b6c2dcb7f6 --- /dev/null +++ b/metrics/config/prometheus.yml @@ -0,0 +1,51 @@ +# my global config +global: + scrape_interval: 15s # By default, scrape targets every 15 seconds. + evaluation_interval: 15s # By default, scrape targets every 15 seconds. + # scrape_timeout is set to the global default (10s). + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'ymir-project' + +# Load and evaluate rules in this file every 'evaluation_interval' seconds. +rule_files: + # - "alert.rules" + # - "first.rules" + # - "second.rules" + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + + - job_name: "nodeexporter" + static_configs: + - targets: ["nodeexporter:9100"] + + - job_name: "cadvisor" + static_configs: + - targets: ["cadvisor:8080"] + + - job_name: "ymir-backend" + static_configs: + - targets: ["backend:80", "backend:9527"] + + - job_name: 'blackbox' + metrics_path: /probe + params: + module: [http_2xx] + static_configs: + - targets: + - http://web:80/health # YMIR-web + - http://backend:80/health # YMIR-backend App. + - http://backend:9527/health # YMIR-backend Viewer. + - http://baidu.com # Network check. + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + - target_label: __address__ + replacement: "blackbox:9115" diff --git a/metrics/config/promtail.yml b/metrics/config/promtail.yml new file mode 100644 index 0000000000..1b121b6387 --- /dev/null +++ b/metrics/config/promtail.yml @@ -0,0 +1,23 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /etc/promtail/positions.yaml + +clients: + - url: http://loki:3100/loki/api/v1/push + +scrape_configs: +- job_name: system + static_configs: + - targets: + - localhost + labels: + job: varlogs + __path__: /var/log/*log + - targets: + - localhost + labels: + job: dockerlogs + __path__: /var/dockerlog/*/*/executor.log diff --git a/metrics/grafana/provisioning/dashboards/dashboard.yml b/metrics/grafana/provisioning/dashboards/dashboard.yml new file mode 100644 index 0000000000..4c387ae1a8 --- /dev/null +++ b/metrics/grafana/provisioning/dashboards/dashboard.yml @@ -0,0 +1,12 @@ +apiVersion: 1 + +providers: + - name: 'Prometheus' + orgId: 1 + folder: '' + type: file + disableDeletion: false + editable: true + allowUiUpdates: true + options: + path: /etc/grafana/provisioning/dashboards diff --git a/metrics/grafana/provisioning/dashboards/docker_containers.json b/metrics/grafana/provisioning/dashboards/docker_containers.json new file mode 100644 index 0000000000..788ec1fca4 --- /dev/null +++ b/metrics/grafana/provisioning/dashboards/docker_containers.json @@ -0,0 +1,2639 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Containers metrics", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [ + { + "asDropdown": false, + "icon": "bolt", + "includeVars": true, + "keepTime": true, + "tags": [], + "targetBlank": true, + "title": "View In Explore", + "tooltip": "", + "type": "link", + "url": "/explore?orgId=1&left=[\"now-1h\",\"now\",\"Loki\",{\"expr\":\"{filename=\\\"$filename\\\"}\"},{\"ui\":[true,true,true,\"none\"]}]" + }, + { + "asDropdown": false, + "icon": "external link", + "includeVars": false, + "keepTime": false, + "tags": [], + "targetBlank": true, + "title": "Learn LogQL", + "tooltip": "", + "type": "link", + "url": "https://grafana.com/docs/loki/latest/logql/" + } + ], + "liveNow": false, + "panels": [ + { + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "title": "YMIR-logs", + "type": "row" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "loki", + "uid": "ymir_metrics_loki" + }, + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 1 + }, + "hiddenSeries": false, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "ymir_metrics_loki" + }, + "editorMode": "code", + "expr": "sum(count_over_time({filename=\"$filename\"} |= \"$search\" [$__interval]))", + "queryType": "range", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:168", + "format": "short", + "logBase": 1, + "show": false + }, + { + "$$hashKey": "object:169", + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "datasource": { + "type": "loki", + "uid": "ymir_metrics_loki" + }, + "gridPos": { + "h": 16, + "w": 22, + "x": 1, + "y": 4 + }, + "id": 3, + "maxDataPoints": "", + "options": { + "dedupStrategy": "none", + "enableLogDetails": false, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": false, + "sortOrder": "Ascending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "ymir_metrics_loki" + }, + "editorMode": "code", + "expr": "{filename=\"$filename\"} |= \"$search\"", + "queryType": "range", + "refId": "A" + } + ], + "transparent": true, + "type": "logs" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 4, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "refId": "A" + } + ], + "title": "YMIR-Services", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "orange", + "value": 0.6 + }, + { + "color": "green", + "value": 0.85 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 15, + "x": 0, + "y": 21 + }, + "id": 5, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"web\"}[15m]))", + "legendFormat": "web", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"backend\"}[15m]))", + "hide": false, + "legendFormat": "backend", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"db\"}[15m]))", + "hide": false, + "legendFormat": "db", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"mongodb\"}[15m]))", + "hide": false, + "legendFormat": "mongodb", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"redis\"}[15m]))", + "hide": false, + "legendFormat": "redis", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"tensorboard\"}[15m]))", + "hide": false, + "legendFormat": "tensorboard", + "range": true, + "refId": "H" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"labelstudio\"}[15m]))", + "hide": false, + "legendFormat": "labelstudio", + "range": true, + "refId": "P" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"blackbox\"}[15m]))", + "hide": false, + "legendFormat": "blackbox", + "range": true, + "refId": "I" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"cadvisor\"}[15m]))", + "hide": false, + "legendFormat": "cadvisor", + "range": true, + "refId": "J" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"grafana\"}[15m]))", + "hide": false, + "legendFormat": "grafana", + "range": true, + "refId": "K" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"nodeexporter\"}[15m]))", + "hide": false, + "legendFormat": "nodeexporter", + "range": true, + "refId": "L" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"prometheus\"}[15m]))", + "hide": false, + "legendFormat": "prometheus", + "range": true, + "refId": "M" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"loki\"}[15m]))", + "hide": false, + "legendFormat": "loki", + "range": true, + "refId": "N" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_last_seen{container_label_com_docker_compose_service=\"promtail\"}[15m]))", + "hide": false, + "legendFormat": "promtail", + "range": true, + "refId": "O" + } + ], + "title": "Health Rate (last 15m)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "fillOpacity": 70, + "lineWidth": 0, + "spanNulls": false + }, + "mappings": [], + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 9, + "x": 15, + "y": 21 + }, + "id": 6, + "links": [], + "options": { + "alignValue": "left", + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "mergeValues": true, + "rowHeight": 0.9, + "showValue": "never", + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"web\"})", + "legendFormat": "web", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"backend\"})", + "hide": false, + "legendFormat": "backend", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"db\"})", + "hide": false, + "instant": false, + "legendFormat": "db", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"mongodb\"})", + "hide": false, + "legendFormat": "mongodb", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"redis\"})", + "hide": false, + "legendFormat": "redis", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"tensorboard\"})", + "hide": false, + "legendFormat": "tensorboard", + "range": true, + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "absent(container_last_seen{container_label_com_docker_compose_service=\"labelstudio\"})", + "hide": false, + "legendFormat": "labelstudio", + "range": true, + "refId": "I" + } + ], + "title": "Core Service Alert", + "type": "state-timeline" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 7, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "refId": "A" + } + ], + "title": "YMIR-App", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 0, + "y": 30 + }, + "id": 8, + "links": [], + "options": { + "displayLabels": [ + "percent" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "showLegend": true, + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum by(status_code, path, job) (starlette_requests_total)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{job}}-{{path}}-{{status_code}}", + "refId": "A" + } + ], + "title": "App-Requests", + "type": "piechart" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 4, + "w": 8, + "x": 8, + "y": 30 + }, + "hiddenSeries": false, + "id": 9, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.95, sum (rate(starlette_request_duration_seconds_bucket[5m])) by (le, instance))", + "interval": "", + "legendFormat": "p95", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum (rate(starlette_request_duration_seconds_bucket[5m])) by (le, instance))", + "interval": "", + "legendFormat": "p99", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "sum (starlette_request_duration_seconds_count) / sum(starlette_request_duration_seconds_sum)", + "interval": "", + "legendFormat": "avg", + "range": true, + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Request Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "show": true + }, + { + "format": "bytes", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 16, + "y": 30 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editorMode": "code", + "expr": "histogram_quantile(1, sum (rate(starlette_request_duration_seconds_bucket[5m])) by (le, instance, path, status_code))", + "interval": "", + "legendFormat": "{{ instance }}-{{path}}-{{status_code}}", + "range": true, + "refId": "A" + } + ], + "title": "Slow Response", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 11, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "refId": "A" + } + ], + "title": "YMIR-Viewer", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 0, + "y": 35 + }, + "id": 12, + "legend": { + "show": true, + "values": true + }, + "legendType": "Right side", + "links": [], + "nullPointMode": "connected", + "options": { + "displayLabels": [ + "percent" + ], + "legend": { + "displayMode": "list", + "placement": "right", + "showLegend": true, + "values": [] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pieType": "pie", + "pluginVersion": "9.1.0", + "strokeWidth": 1, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by(code, uri, instance) (gin_uri_request_total)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}-{{uri}}-{{code}}", + "refId": "A" + } + ], + "title": "Viewer-Requests", + "type": "piechart" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "unit": "s" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 4, + "w": 8, + "x": 8, + "y": 35 + }, + "hiddenSeries": false, + "id": 13, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true, + "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "histogram_quantile(0.95, sum (rate(gin_request_duration_bucket[5m])) by (le, instance))", + "interval": "", + "legendFormat": "p95", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "histogram_quantile(0.99, sum (rate(gin_request_duration_bucket[5m])) by (le, instance))", + "interval": "", + "legendFormat": "p99", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum (gin_request_duration_sum) / sum(gin_request_duration_count)", + "interval": "", + "legendFormat": "avg", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Request Duration", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "show": true + }, + { + "format": "bytes", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + }, + { + "color": "green", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 8, + "x": 16, + "y": 35 + }, + "id": 14, + "links": [], + "options": { + "displayMode": "basic", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by(uri, instance) (gin_slow_request_total)", + "format": "time_series", + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}-{{uri}}", + "refId": "A" + } + ], + "title": "Slow Request(> 10s)", + "type": "bargauge" + }, + { + "collapsed": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 39 + }, + "id": 15, + "panels": [], + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "refId": "A" + } + ], + "title": "Server", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 0, + "y": 40 + }, + "hideTimeOverride": true, + "id": 16, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum(rate(container_cpu_user_seconds_total{image!=\"\"}[5m])) / count(node_cpu_seconds_total{mode=\"user\"}) * 100", + "interval": "10s", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 10 + } + ], + "timeFrom": "10s", + "title": "CPU Load", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 4, + "y": 40 + }, + "id": 17, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "machine_cpu_cores", + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "metric": "machine_cpu_cores", + "refId": "A", + "step": 20 + } + ], + "title": "CPU Cores", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 8, + "y": 40 + }, + "hideTimeOverride": true, + "id": 18, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "(sum(node_memory_MemTotal_bytes) - sum(node_memory_MemFree_bytes+node_memory_Buffers_bytes+node_memory_Cached_bytes) ) / sum(node_memory_MemTotal_bytes) * 100", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 20 + } + ], + "timeFrom": "10s", + "title": "Memory Load", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 40 + }, + "hideTimeOverride": true, + "id": 19, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum(container_memory_usage_bytes{image!=\"\"})", + "interval": "10s", + "intervalFactor": 2, + "legendFormat": "", + "refId": "A", + "step": 20 + } + ], + "timeFrom": "10s", + "title": "Used Memory", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 65 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 16, + "y": 40 + }, + "hideTimeOverride": true, + "id": 20, + "links": [], + "maxDataPoints": 100, + "options": { + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "(node_filesystem_size_bytes{fstype=~\"aufs|ext4\"} - node_filesystem_free_bytes{fstype=~\"aufs|ext4\"}) / node_filesystem_size_bytes{fstype=~\"aufs|ext4\"} * 100", + "interval": "30s", + "intervalFactor": 1, + "legendFormat": "", + "refId": "A", + "step": 30 + } + ], + "timeFrom": "10s", + "title": "Storage Load", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 20, + "y": 40 + }, + "hideTimeOverride": true, + "id": 21, + "links": [], + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum(container_fs_usage_bytes)", + "interval": "30s", + "intervalFactor": 2, + "refId": "A", + "step": 60 + } + ], + "timeFrom": "10s", + "title": "Used Storage", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 0, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 4, + "w": 8, + "x": 0, + "y": 44 + }, + "hiddenSeries": false, + "id": 22, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "scalar(count(container_memory_usage_bytes{image!=\"\"}) > 0)", + "interval": "", + "intervalFactor": 2, + "legendFormat": "containers", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Running Containers", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": "", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 4, + "w": 8, + "x": 8, + "y": 44 + }, + "hiddenSeries": false, + "id": 23, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "load 1m", + "color": "#BF1B00" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "node_load1", + "interval": "", + "intervalFactor": 2, + "legendFormat": "load 1m", + "metric": "node_load1", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "System Load", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 4, + "w": 8, + "x": 16, + "y": 44 + }, + "hiddenSeries": false, + "id": 24, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": true, + "show": false, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "read", + "yaxis": 1 + }, + { + "alias": "written", + "yaxis": 1 + }, + { + "alias": "io time", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum(irate(node_disk_read_bytes_total[5m]))", + "interval": "2s", + "intervalFactor": 4, + "legendFormat": "read", + "metric": "", + "refId": "A", + "step": 8 + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum(irate(node_disk_written_bytes_total[5m]))", + "interval": "2s", + "intervalFactor": 4, + "legendFormat": "written", + "metric": "", + "refId": "B", + "step": 8 + }, + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum(irate(node_disk_io_time_seconds_total[5m]))", + "interval": "2s", + "intervalFactor": 4, + "legendFormat": "io time", + "metric": "", + "refId": "C", + "step": 8 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "I/O Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "show": true + }, + { + "format": "ms", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 48 + }, + "hiddenSeries": false, + "id": 25, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by (name) (rate(container_cpu_usage_seconds_total{image!=\"\"}[5m])) / scalar(count(node_cpu_seconds_total{mode=\"user\"})) * 100", + "intervalFactor": 10, + "legendFormat": "{{ name }}", + "metric": "container_cpu_user_seconds_total", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Container CPU Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "percent", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 55 + }, + "hiddenSeries": false, + "id": 26, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by (name)(container_memory_usage_bytes{image!=\"\"})", + "intervalFactor": 1, + "legendFormat": "{{ name }}", + "metric": "container_memory_usage", + "refId": "A", + "step": 1 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Container Memory Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 62 + }, + "hiddenSeries": false, + "id": 27, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by (name) (container_memory_cache{image!=\"\"})", + "intervalFactor": 2, + "legendFormat": "{{name}}", + "metric": "container_memory_cache", + "refId": "A", + "step": 2 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Container Cached Memory Usage", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 69 + }, + "hiddenSeries": false, + "id": 28, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by (name) (rate(container_network_receive_bytes_total{image!=\"\"}[5m]))", + "intervalFactor": 10, + "legendFormat": "{{ name }}", + "metric": "container_network_receive_bytes_total", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Container Network Input", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 24, + "x": 0, + "y": 76 + }, + "hiddenSeries": false, + "id": 29, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": true, + "current": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "9.1.0", + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ymir_metrics_prometheus" + }, + "expr": "sum by (name) (rate(container_network_transmit_bytes_total{image!=\"\"}[5m]))", + "intervalFactor": 10, + "legendFormat": "{{ name }}", + "metric": "container_network_transmit_bytes_total", + "refId": "A", + "step": 10 + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Container Network Output", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 2, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "logBase": 1, + "min": 0, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": false + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "10s", + "schemaVersion": 37, + "style": "dark", + "tags": [ + "docker" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "/var/log/ymir_app.log", + "value": "/var/log/ymir_app.log" + }, + "datasource": { + "type": "loki", + "uid": "ymir_metrics_loki" + }, + "definition": "label_values(filename)", + "hide": 0, + "includeAll": false, + "label": "Filename", + "multi": false, + "name": "filename", + "options": [], + "query": "label_values(filename)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "hide": 0, + "label": "String Match", + "name": "search", + "options": [ + { + "selected": true, + "text": "", + "value": "" + } + ], + "query": "", + "skipUrlSync": false, + "type": "textbox" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "browser", + "title": "Docker Containers", + "uid": "RiZOV3mVz", + "version": 1, + "weekStart": "" +} diff --git a/metrics/grafana/provisioning/datasources/datasource.yml b/metrics/grafana/provisioning/datasources/datasource.yml new file mode 100644 index 0000000000..f3f7b89b73 --- /dev/null +++ b/metrics/grafana/provisioning/datasources/datasource.yml @@ -0,0 +1,18 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + uid: ymir_metrics_prometheus + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + editable: true + - name: Loki + type: loki + access: proxy + uid: ymir_metrics_loki + orgId: 1 + url: http://loki:3100 diff --git a/ymir.sh b/ymir.sh index 0f3045f005..97fab9baa0 100755 --- a/ymir.sh +++ b/ymir.sh @@ -20,10 +20,11 @@ FIELD_LABEL_TOOL_LS='label_studio' FIELD_LABEL_TOOL_LF='label_free' ENV_FILE='.env' +FIELD_DEPLOY_MODULE_HOST_PORT='DEPLOY_MODULE_HOST_PORT' + stop() { -docker-compose down -docker-compose -f docker-compose.label_studio.yml down -docker-compose -f docker-compose.labelfree.yml down +docker-compose -f docker-compose.yml -f docker-compose.dev.yml -f docker-compose.label_studio.yml \ +-f docker-compose.labelfree.yml -f docker-compose.modeldeploy.yml down } pre_start() { @@ -125,12 +126,29 @@ else fi } +start_deploy_module() { + if cat ${ENV_FILE} | grep -oE "^${FIELD_DEPLOY_MODULE_HOST_PORT}=$"; then + echo "DEPLOY_MODULE_HOST_PORT not set, skip deploy module startup" + return + fi + + if ! cat ${ENV_FILE} | grep -oE "^${FIELD_DEPLOY_MODULE_HOST_PORT}=[0-9]{1,5}$"; then + echo "DEPLOY_MODULE_HOST_PORT is invalid" + exit + fi + + echo "deploy module, starting..." + docker-compose -f docker-compose.modeldeploy.yml up -d +} + start() { check_permission pre_start start_label_tool +start_deploy_module + if [[ $1 == 'dev' ]]; then printf '\nin dev mode, building images.\n' docker build \ @@ -145,11 +163,29 @@ if [[ $1 == 'dev' ]]; then else printf '\nin prod mode, starting service.\n' fi + docker-compose up -d } +update() { + stop + +cat <<- EOF +Before proceed, make sure to BACKUP your YMIR-workplace folder. +Only supports to upgrade from 1.1.0 (22-May) to 2.0.0 (22-Oct), otherwise may cause data damage. +EOF + +while true; do + read -p "Continue (y/n)?" yn + case $yn in + [Yy]* ) docker-compose -f docker-compose.updater.yml up; break;; + * ) break;; + esac +done +} + print_help() { - printf '\nUsage: \n bash ymir.sh start/stop.\n' + printf '\nUsage: \n bash ymir.sh start/stop/update.\n' } # main diff --git a/ymir/Dockerfile.backend b/ymir/Dockerfile.backend index 08d89b741d..509f9482f0 100644 --- a/ymir/Dockerfile.backend +++ b/ymir/Dockerfile.backend @@ -1,38 +1,53 @@ +FROM golang:1.18.5 AS build + +# --build-arg GO_SOURCE='https://mirrors.aliyun.com/goproxy/,direct' +ARG GO_SOURCE=https://proxy.golang.org,direct + +RUN go env -w GO111MODULE=on +RUN go env -w GOPROXY=${GO_SOURCE} + +WORKDIR /app +COPY ./backend/src/ymir_hel/go.mod . +COPY ./backend/src/ymir_hel/go.sum . +RUN go mod download -x +COPY ./backend/src/ymir_hel/ /ymir_hel +WORKDIR /ymir_hel +RUN go build -mod=readonly -v -o hel_server + FROM ubuntu:20.04 ENV DEBIAN_FRONTEND=noninteractive +# --build-arg PIP_SOURCE=https://pypi.mirrors.ustc.edu.cn/simple ARG PIP_SOURCE=https://pypi.org/simple RUN apt-get update \ - && apt-get install -y git \ - && apt-get install -y vim \ - && apt-get install -y --no-install-recommends python3-pip python3-dev \ + && apt-get install -y curl git vim \ + && apt-get install -y --no-install-recommends python3-pip python3-dev mysql-client \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* \ && cd /usr/local/bin \ && ln -s /usr/bin/python3 python \ && pip3 install -i ${PIP_SOURCE} --upgrade pip -RUN git config --global core.fileMode false - -RUN pip install --no-cache-dir "uvicorn[standard]" gunicorn -i ${PIP_SOURCE} - RUN mkdir -p /data/sharing/ RUN mkdir -p /app_logs/ -COPY ./backend/requirements.txt ./ -RUN pip3 install -r requirements.txt -i ${PIP_SOURCE} +COPY ./backend/requirements.txt ./backend-requirements.txt +RUN pip3 install -r backend-requirements.txt -i ${PIP_SOURCE} + +COPY ./command/requirements.txt ./command-requirements.txt +RUN pip3 install -r command-requirements.txt -i ${PIP_SOURCE} COPY ./command /command RUN pip3 install -U /command -i ${PIP_SOURCE} COPY ./backend/src /app -WORKDIR /app - -COPY ./backend/deploy/git.config /root/.gitconfig +COPY --from=build /ymir_hel/hel_server /app/ymir_hel/hel_server +WORKDIR /app +COPY ./backend/deploy/git.config /root/.gitconfig COPY ./backend/deploy/supervisor /app/supervisor ENV PYTHONPATH=/app/ymir_app:/app/ymir_controller:/app/ymir_viz:/app/common:/app/ymir_monitor:/app/ymir_postman diff --git a/ymir/Dockerfile.updater b/ymir/Dockerfile.updater new file mode 100644 index 0000000000..48c0be417d --- /dev/null +++ b/ymir/Dockerfile.updater @@ -0,0 +1,22 @@ +FROM python:3.8.13-alpine + +RUN apk update \ + && apk add git + +# requirements +COPY ./updater/app/requirements.txt /app/requirements.txt +ARG PIP_SOURCE=https://pypi.org/simple +RUN pip3 install -r /app/requirements.txt -i ${PIP_SOURCE} + +# git config +COPY ./backend/deploy/git.config /root/.gitconfig + +# ymir controller and command components +COPY ./backend/src/common/common_utils/ /app/common_utils/ +COPY ./backend/src/common/id_definition/ /app/id_definition/ +COPY ./command/mir/ /app/mir/ + +# updater app +COPY ./updater/app /app + +CMD python /app/start.py diff --git a/ymir/backend/.bumpversion.cfg b/ymir/backend/.bumpversion.cfg index af6d29ae8d..b6b0759f38 100644 --- a/ymir/backend/.bumpversion.cfg +++ b/ymir/backend/.bumpversion.cfg @@ -1,3 +1,3 @@ [bumpversion] -current_version = 1.1.0 +current_version = 1.2.1 commit = False diff --git a/ymir/backend/.gitignore b/ymir/backend/.gitignore index 2b0e3f1e84..8e4f9c382c 100644 --- a/ymir/backend/.gitignore +++ b/ymir/backend/.gitignore @@ -15,7 +15,6 @@ dist .vscode .pytest_cache server_config.server -protos mir .env .tox diff --git a/ymir/backend/deploy/git.config b/ymir/backend/deploy/git.config index 50325391b2..da63444899 100644 --- a/ymir/backend/deploy/git.config +++ b/ymir/backend/deploy/git.config @@ -1,3 +1,5 @@ +[core] + filemode = false [user] name = name email = name@ymir.ai diff --git a/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini b/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini index 4d55c92e0f..b2db41040d 100644 --- a/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini +++ b/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini @@ -19,18 +19,18 @@ stopasgroup=true startsecs=50 -[program:viz_service] -command=/bin/bash -c "cd ymir_viz && gunicorn -k gevent -c gunicorn_conf.py wsgi:connexion_app" +[program:viewer_service] +command=/bin/bash -c "cd ymir_hel && ./hel_server viewer" numprocs=1 autostart=true autorestart=true redirect_stderr=true ; redirect proc stderr to stdout (default false) -stdout_logfile=/app_logs/ymir_viz.log ; stdout log path, NONE for none; default AUTO +stdout_logfile=/app_logs/ymir_viewer.log ; stdout log path, NONE for none; default AUTO stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB) stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10)stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) stdout_events_enabled=false ; emit events on stdout writes (default false) stdout_syslog=false ; send stdout to syslog with process name (default false) -stderr_logfile=/app_logs/ymir_viz.log ; stderr log path, NONE for none; default AUTO +stderr_logfile=/app_logs/ymir_viewer_err.log ; stderr log path, NONE for none; default AUTO stderr_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB) stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) @@ -39,10 +39,10 @@ stderr_syslog=false ; send stderr to syslog with process name (default stopasgroup=true startsecs=50 - [program:controller_server] -command=python ymir_controller/controller/server.py -d -f ymir_controller/controller/server_prd_config.yaml +command=python ymir_controller/controller/server.py -f ymir_controller/controller/server_prd_config.yaml numprocs=1 +priority=1 autostart=true autorestart=true redirect_stderr=true ; redirect proc stderr to stdout (default false) @@ -83,8 +83,9 @@ startsecs=50 [program:app] -command=/bin/bash -c "cd ymir_app && sh prestart.sh && sh start.sh" +command=/bin/bash -c "sleep 1 && cd ymir_app && sh prestart.sh && sh start.sh" numprocs=1 +priority=99 autostart=true autorestart=true redirect_stderr=true ; redirect proc stderr to stdout (default false) diff --git a/ymir/backend/docs/img/ymir.png b/ymir/backend/docs/img/ymir.png deleted file mode 100644 index 4ffcf6c12c..0000000000 Binary files a/ymir/backend/docs/img/ymir.png and /dev/null differ diff --git a/ymir/backend/requirements-dev.txt b/ymir/backend/requirements-dev.txt index df4b8adaba..ca8ab50248 100644 --- a/ymir/backend/requirements-dev.txt +++ b/ymir/backend/requirements-dev.txt @@ -14,3 +14,5 @@ sqlalchemy-stubs==0.4 tox==3.24.0 types-protobuf==3.18.2 types-PyYAML==6.0.1 +types-redis==3.5.7 +types-requests==2.25.2 diff --git a/ymir/backend/requirements.txt b/ymir/backend/requirements.txt index a690cb03c4..9f7dea9c3c 100644 --- a/ymir/backend/requirements.txt +++ b/ymir/backend/requirements.txt @@ -1,45 +1,35 @@ aiofiles==0.7.0 -aiohttp==3.7.4.post0 -aioredis==2.0.1 +aiohttp==3.8.3 alembic==1.6.5 apscheduler==3.7.0 -arrow==1.1.1 bcrypt==3.2.0 -clickhouse-driver==0.2.2 -connexion==2.6.0 -connexion[swagger-ui] >= 2.6.0 cryptography==3.4.7 dependency-injector==4.37.0 email-validator==1.1.2 emails==0.6 fastapi-cache2[redis]==0.1.8 +fastapi-health==0.4.0 fastapi-login==1.6.0 fastapi-socketio==0.0.9 -fastapi==0.65.1 -gevent==21.1.2 +fastapi==0.65.2 +grpcio-health-checking==1.39.0 grpcio-tools==1.39.0 grpcio==1.39.0 grpclib==0.4.2 gunicorn==20.1.0 Jinja2==3.0.1 -jsonschema==3.2.0 -Pillow>=8.2.0 PyMySQL==1.0.2 pynvml==11.0.0 -python-dotenv==0.17.1 -python-jose==3.2.0 +python-jose==3.3.0 python-multipart==0.0.5 python_dateutil==2.7.0 python-socketio==5.0.4 -pyyaml>=5.4.1 -redis==3.5.3 -redisgraph==2.4.0 -requests>=2.25.1 +redis==4.3.4 sentry-sdk==1.1.0 sentry-sdk[flask]==1.1.0 setuptools>=21.0.0 sqlalchemy==1.4.20 +starlette-exporter==0.14.0 statsd==3.3.0 supervisor==4.2.2 -types-redis==3.5.7 -types-requests==2.25.2 +websockets==10.3 diff --git a/ymir/backend/src/common/common_utils/labels.py b/ymir/backend/src/common/common_utils/labels.py index 55425889c0..13b54bcff7 100644 --- a/ymir/backend/src/common/common_utils/labels.py +++ b/ymir/backend/src/common/common_utils/labels.py @@ -1,167 +1,14 @@ -from datetime import datetime -import logging -import os -from typing import Any, Dict, Iterator, List, Set, Union - from google.protobuf import json_format -from pydantic import BaseModel, root_validator, validator -import yaml +from mir.tools.class_ids import LabelStorage, SingleLabel, UserLabels # noqa from proto import backend_pb2 -EXPECTED_FILE_VERSION = 1 - - -class SingleLabel(BaseModel): - id: int = -1 - name: str - create_time: datetime = datetime(year=2022, month=1, day=1) - update_time: datetime = datetime(year=2022, month=1, day=1) - aliases: List[str] = [] - - @validator('name') - def _strip_and_lower_name(cls, v: str) -> str: - return v.strip().lower() - - @validator('aliases', each_item=True) - def _strip_and_lower_alias(cls, v: str) -> str: - return v.strip().lower() - - -class LabelStorage(BaseModel): - version: int = EXPECTED_FILE_VERSION - labels: List[SingleLabel] = [] - - @validator('version') - def _check_version(cls, v: int) -> int: - if v != EXPECTED_FILE_VERSION: - raise ValueError(f"incorrect version: {v}, needed {EXPECTED_FILE_VERSION}") - return v - - @validator('labels') - def _check_labels(cls, labels: List[SingleLabel]) -> List[SingleLabel]: - label_names_set: Set[str] = set() - for idx, label in enumerate(labels): - if label.id < 0: - label.id = idx - if label.id != idx: - raise ValueError(f"invalid label id: {label.id}, expected {idx}") - - # all label names and aliases should have no dumplicate - name_and_aliases = label.aliases + [label.name] - name_and_aliases_set = set(name_and_aliases) - if len(name_and_aliases) != len(name_and_aliases_set): - raise ValueError(f"duplicated inline label: {name_and_aliases}") - duplicated = set.intersection(name_and_aliases_set, label_names_set) - if duplicated: - raise ValueError(f"duplicated: {duplicated}") - label_names_set.update(name_and_aliases_set) - return labels - - -class UserLabels(LabelStorage): - id_to_name: Dict[int, str] = {} - name_to_id: Dict[str, int] = {} - name_aliases_to_id: Dict[str, int] = {} - - @root_validator() - def fill_lookup_tables(cls, values: Any): - for label in values["labels"]: - values["id_to_name"][label.id] = label.name - values["name_to_id"][label.name] = label.id - values["name_aliases_to_id"][label.name] = label.id - for alias in label.aliases: - values["name_aliases_to_id"][alias] = label.id - return values - - class Config: - fields = {'labels': {'include': True}} - - def get_class_ids(self, names_or_aliases: Union[str, List[str]]) -> List[int]: - if isinstance(names_or_aliases, str): - return [self.name_aliases_to_id[names_or_aliases]] - elif isinstance(names_or_aliases, list): - return [self.name_aliases_to_id[name_or_aliaes] for name_or_aliaes in names_or_aliases] - else: - raise ValueError(f"unsupported type: {type(names_or_aliases)}") - - def get_main_names(self, class_ids: Union[int, List[int]]) -> List[str]: - if isinstance(class_ids, int): - return [self.id_to_name[class_ids]] - elif isinstance(class_ids, list): - return [self.id_to_name[class_id] for class_id in class_ids] - else: - raise ValueError(f"unsupported type: {type(class_ids)}") - - # keyword: {"name": "dog", "aliases": ["puppy", "pup", "canine"]} - def filter_labels( - self, - required_name_aliaes: List[str] = None, - required_ids: List[int] = None, - ) -> Iterator[SingleLabel]: - if required_name_aliaes and required_ids: - raise ValueError("required_name_alias and required_ids cannot be both set.") - if required_name_aliaes: - required_ids = self.get_class_ids(names_or_aliases=required_name_aliaes) +# indirect imports so that ymir_app does not need to import mir-cmd package. +from mir.tools.class_ids import ids_file_name, load_or_create_userlabels # type: ignore # noqa - for label in self.labels: - if required_ids is None or label.id in required_ids: - yield label - def find_dups(self, new_labels: Any) -> List[str]: - if isinstance(new_labels, str): - new_set = {new_labels} - elif isinstance(new_labels, list): - new_set = set(new_labels) - elif isinstance(new_labels, type(self)): - new_set = set(new_labels.name_aliases_to_id.keys()) - else: - raise ValueError(f"unsupported type: {type(new_labels)}") - return list(set(self.name_aliases_to_id.keys()) & new_set) - - def to_proto(self) -> backend_pb2.LabelCollection: - return json_format.Parse(self.json(), backend_pb2.LabelCollection(), ignore_unknown_fields=False) - - -def merge_labels(label_storage_file: str, - new_labels: UserLabels, - check_only: bool = False) -> UserLabels: - current_labels = get_user_labels_from_storage(label_storage_file) - current_time = datetime.now() - - conflict_labels = [] - for label in new_labels.labels: - new_label = SingleLabel.parse_obj(label.dict()) - idx = current_labels.name_to_id.get(label.name, None) - - # in case any alias is in other labels. - conflict_alias = [] - for alias in label.aliases: - alias_idx = current_labels.name_aliases_to_id.get(alias, idx) - if alias_idx != idx: - conflict_alias.append(alias) - if conflict_alias: - new_label.id = -1 - conflict_labels.append(new_label) - continue - - new_label.update_time = current_time - if idx is not None: # update alias. - new_label.id = idx - new_label.create_time = current_labels.labels[idx].create_time - current_labels.labels[idx] = new_label - else: # insert new record. - new_label.id = len(current_labels.labels) - new_label.create_time = current_time - current_labels.labels.append(new_label) - - if not (check_only or conflict_labels): - label_storage = LabelStorage(labels=current_labels.labels) - with open(label_storage_file, 'w') as f: - yaml.safe_dump(label_storage.dict(), f) - - logging.info(f"conflict labels: {conflict_labels}") - return UserLabels(labels=conflict_labels) +def userlabels_to_proto(user_labels: UserLabels) -> backend_pb2.LabelCollection: + return json_format.Parse(user_labels.json(), backend_pb2.LabelCollection(), ignore_unknown_fields=False) def parse_labels_from_proto(label_collection: backend_pb2.LabelCollection) -> UserLabels: @@ -170,34 +17,3 @@ def parse_labels_from_proto(label_collection: backend_pb2.LabelCollection) -> Us use_integers_for_enums=True) return UserLabels.parse_obj(label_dict) - - -def default_labels_file_name() -> str: - return 'labels.yaml' - - -def get_user_labels_from_storage(label_storage_file: str) -> UserLabels: - """ - get all labels from label storage file - - Returns: - UserLabels: all labels - - Raises: - FileNotFoundError: if label storage file not found - ValueError: if version mismatch - Error: if parse failed or other error occured - """ - obj = {} - if os.path.isfile(label_storage_file): - with open(label_storage_file, 'r') as f: - obj = yaml.safe_load(f) - return UserLabels(**obj) - - -def create_empty(label_storage_file: str) -> None: - if os.path.isfile(label_storage_file): - raise FileExistsError(f"already exists: {label_storage_file}") - - with open(label_storage_file, 'w') as f: - yaml.safe_dump(LabelStorage().dict(), f) diff --git a/ymir/backend/src/common/common_utils/percent_log_util.py b/ymir/backend/src/common/common_utils/percent_log_util.py index 9ce17dea9c..a288c4ce91 100644 --- a/ymir/backend/src/common/common_utils/percent_log_util.py +++ b/ymir/backend/src/common/common_utils/percent_log_util.py @@ -30,8 +30,10 @@ class PercentLogHandler: def parse_percent_log(log_file: str) -> PercentResult: with open(log_file, "r") as f: monitor_file_lines = f.readlines() + if not monitor_file_lines: + raise EOFError(f"empty percent log file: {log_file}") content_row_one = monitor_file_lines[0].strip().split("\t") - if not monitor_file_lines or len(content_row_one) < 4: + if len(content_row_one) < 4: raise ValueError(f"invalid percent log file: {log_file}") task_id, timestamp, percent, state, *_ = content_row_one diff --git a/ymir/backend/src/common/common_utils/sandbox_updater.py b/ymir/backend/src/common/common_utils/sandbox_updater.py new file mode 100644 index 0000000000..5402449cd9 --- /dev/null +++ b/ymir/backend/src/common/common_utils/sandbox_updater.py @@ -0,0 +1,105 @@ +import logging +import os +import shutil +from typing import Callable, List, Optional, Tuple + +import yaml + +from common_utils.sandbox_util import check_sandbox, detect_users_and_repos +from common_utils.version import ymir_salient_version + +from update_1_1_0_to_2_0_0.step_updater import update_models as update_models_110_200 +from update_1_1_0_to_2_0_0.step_updater import update_repo as update_repo_110_200 + + +_RepoUpdaterType = Callable[[str, str, str], None] +_ModelsUpdaterType = Callable[[str], None] +_StepUpdaterType = Tuple[Optional[_RepoUpdaterType], Optional[_ModelsUpdaterType]] + + +def update(sandbox_root: str, assets_root: str, models_root: str, src_ver: str, dst_ver: str) -> None: + steps = _get_update_steps(src_ver=src_ver, dst_ver=dst_ver) + if not steps: + logging.info(f"nothing to update {src_ver} -> {dst_ver}") + return + + check_sandbox(sandbox_root) + _backup(sandbox_root=sandbox_root, models_root=models_root) + + # update + user_to_repos = detect_users_and_repos(sandbox_root) + try: + for repo_func, models_func in steps: + # update user repos + if repo_func: + for user_id, repo_ids in user_to_repos.items(): + for repo_id in repo_ids: + repo_func(os.path.join(sandbox_root, user_id, repo_id), assets_root, models_root) + # update models + if models_func: + models_func(models_root) + + for user_id in user_to_repos: + _update_user_labels(label_path=os.path.join(sandbox_root, user_id, 'labels.yaml'), dst_ver=dst_ver) + except Exception as e: + _roll_back(sandbox_root=sandbox_root, models_root=models_root) + raise e + + # cleanup + shutil.rmtree(os.path.join(sandbox_root, 'sandbox-bk')) + shutil.rmtree(os.path.join(sandbox_root, 'ymir-models-bk')) + + +def _backup(sandbox_root: str, models_root: str) -> None: + # user dirs in sandbox_root + sandbox_backup_dir = os.path.join(sandbox_root, 'sandbox-bk') + os.makedirs(sandbox_backup_dir, exist_ok=False) + for user_id in detect_users_and_repos(sandbox_root): + shutil.copytree(src=os.path.join(sandbox_root, user_id), + dst=os.path.join(sandbox_backup_dir, user_id), + symlinks=True) + + models_backup_dir = os.path.join(sandbox_root, 'ymir-models-bk') + shutil.copytree(src=models_root, dst=models_backup_dir) + + +def _roll_back(sandbox_root: str, models_root: str) -> None: + sandbox_backup_dir = os.path.join(sandbox_root, 'sandbox-bk') + for user_id in detect_users_and_repos(sandbox_root): + src_user_dir = os.path.join(sandbox_backup_dir, user_id) + dst_user_dir = os.path.join(sandbox_root, user_id) + shutil.rmtree(dst_user_dir) + shutil.move(src=src_user_dir, dst=dst_user_dir) + + # models_root + models_backup_dir = os.path.join(sandbox_root, 'ymir-models-bk') + for model_hash in os.listdir(models_backup_dir): + src_model_path = os.path.join(models_backup_dir, model_hash) + if not os.path.isfile(src_model_path): + continue + dst_model_path = os.path.join(models_root, model_hash) + os.remove(dst_model_path) + shutil.move(src=src_model_path, dst=dst_model_path) + + shutil.rmtree(sandbox_backup_dir) + shutil.rmtree(models_backup_dir) + logging.info('roll back done') + + +def _get_update_steps(src_ver: str, dst_ver: str) -> List[_StepUpdaterType]: + eq_src_ver = ymir_salient_version(src_ver) + eq_dst_ver = ymir_salient_version(dst_ver) + + _UPDATE_NODES: List[str] = ['1.1.0', '2.0.0'] + _UPDATE_FUNCS: List[_StepUpdaterType] = [(update_repo_110_200, update_models_110_200)] + return _UPDATE_FUNCS[_UPDATE_NODES.index(eq_src_ver):_UPDATE_NODES.index(eq_dst_ver)] + + +def _update_user_labels(label_path: str, dst_ver: str) -> None: + logging.info(f"updating user labels: {label_path}, 110 -> 200") + + with open(label_path, 'r') as f: + label_contents = yaml.safe_load(f) + label_contents['ymir_version'] = dst_ver + with open(label_path, 'w') as f: + yaml.safe_dump(label_contents, f) diff --git a/ymir/backend/src/common/common_utils/sandbox_util.py b/ymir/backend/src/common/common_utils/sandbox_util.py new file mode 100644 index 0000000000..9555ef639e --- /dev/null +++ b/ymir/backend/src/common/common_utils/sandbox_util.py @@ -0,0 +1,108 @@ +from collections import defaultdict +import logging +import os +import re +from typing import List, Dict, Set + +import yaml + +from common_utils.version import DEFAULT_YMIR_SRC_VERSION +from id_definition.error_codes import UpdaterErrorCode +from id_definition.task_id import IDProto + + +class SandboxError(Exception): + def __init__(self, error_code: int, error_message: str) -> None: + super().__init__() + self.error_code = error_code + self.error_message = error_message + + def __str__(self) -> str: + return f"code: {self.error_code}, message: {self.error_message}" + + +def detect_sandbox_src_versions(sandbox_root: str) -> List[str]: + """ + detect user space versions in this sandbox + + Args: + sandbox_root (str): root of this sandbox + + Returns: + str: sandbox versions + + Raises: + SandboxError if labels.yaml not found, or can not be parsed as yaml; + found no user space version or multiple user space versions. + """ + user_to_repos = detect_users_and_repos(sandbox_root) + if not user_to_repos: + logging.warning(f"can not find user and repos in sandbox: {sandbox_root}") + return [] + + ver_to_users: Dict[str, List[str]] = defaultdict(list) + for user_id in user_to_repos: + user_label_file = os.path.join(sandbox_root, user_id, 'labels.yaml') + try: + with open(user_label_file, 'r') as f: + user_label_dict = yaml.safe_load(f) + except (FileNotFoundError, yaml.YAMLError) as e: + raise SandboxError(error_code=UpdaterErrorCode.INVALID_USER_LABEL_FILE, + error_message=f"invalid label file: {user_label_file}") from e + + ver_to_users[user_label_dict.get('ymir_version', DEFAULT_YMIR_SRC_VERSION)].append(user_id) + + if len(ver_to_users) > 1: + logging.info(f"[detect_sandbox_src_versions]: multiple sandbox versions detected: {ver_to_users}") + + return list(ver_to_users.keys()) + + +def detect_users_and_repos(sandbox_root: str) -> Dict[str, Set[str]]: + """ + detect user and repo directories in this sandbox + + Args: + sandbox_root (str): root of this sandbox + + Returns: + Dict[str, Set[str]]: key: user id, value: repo ids + """ + if not os.path.isdir(sandbox_root): + logging.warning(f"sandbox not exists: {sandbox_root}") + return {} + + user_to_repos = defaultdict(set) + for user_id in os.listdir(sandbox_root): + match_result = re.match(f"^\\d{{{IDProto.ID_LEN_USER_ID}}}$", user_id) + if not match_result: + continue + user_dir = os.path.join(sandbox_root, user_id) + user_to_repos[user_id].update([ + repo_id for repo_id in os.listdir(user_dir) if re.match(f"^\\d{{{IDProto.ID_LEN_REPO_ID}}}$", repo_id) + and os.path.isdir(os.path.join(user_dir, repo_id, '.git')) + ]) + return user_to_repos + + +def check_sandbox(sandbox_root: str) -> None: + user_to_repos = detect_users_and_repos(sandbox_root) + for user_id, repo_ids in user_to_repos.items(): + user_labels_path = os.path.join(sandbox_root, user_id, 'labels.yaml') + if not os.path.isfile(user_labels_path): + raise SandboxError(error_code=UpdaterErrorCode.INVALID_USER_LABEL_FILE, + error_message=f"Invalid user labels: {user_labels_path} is not a file") + + user_labels_inode = os.stat(user_labels_path).st_ino + for repo_id in repo_ids: + repo_labels_path = os.path.join(sandbox_root, user_id, repo_id, '.mir', 'labels.yaml') + if os.path.islink(repo_labels_path): + if os.path.realpath(repo_labels_path) != user_labels_path: + raise SandboxError( + error_code=UpdaterErrorCode.INVALID_USER_LABEL_FILE, + error_message=f"Invalid user labels: {user_labels_path} not symlinked to user labels") + else: + if os.stat(repo_labels_path).st_ino != user_labels_inode: + raise SandboxError( + error_code=UpdaterErrorCode.INVALID_USER_LABEL_FILE, + error_message=f"Invalid user labels: {user_labels_path} not hardlinked to user labels") diff --git a/ymir/backend/src/common/common_utils/version.py b/ymir/backend/src/common/common_utils/version.py new file mode 100644 index 0000000000..06bc7aab02 --- /dev/null +++ b/ymir/backend/src/common/common_utils/version.py @@ -0,0 +1 @@ +from mir.version import DEFAULT_YMIR_SRC_VERSION, ymir_salient_version, YMIR_VERSION # type: ignore # noqa diff --git a/ymir/backend/src/common/id_definition/error_codes.py b/ymir/backend/src/common/id_definition/error_codes.py index 2d3b1051ee..1d40c62cbe 100644 --- a/ymir/backend/src/common/id_definition/error_codes.py +++ b/ymir/backend/src/common/id_definition/error_codes.py @@ -1,6 +1,32 @@ from enum import IntEnum, unique +@unique +class CMDResponseCode(IntEnum): + """ + duplicated from `mir.tools.code.MirCode` + """ + + RC_OK = 0 # everything is ok, command finished without any errors or warnings + RC_CMD_INVALID_ARGS = 160001 # lack of necessary args, or unexpected args + RC_CMD_EMPTY_TRAIN_SET = 160002 # no training set when training + RC_CMD_EMPTY_VAL_SET = 160003 # no validation set when training + RC_CMD_CONTAINER_ERROR = 160004 # container errors + RC_CMD_UNKNOWN_TYPES = 160005 # unknown types found, and can not be ignored when mir import + RC_CMD_INVALID_BRANCH_OR_TAG = 160006 # invalid branch name or tag name + RC_CMD_DIRTY_REPO = 160007 # repo is dirty when mir commit + RC_CMD_MERGE_ERROR = 160008 # error occured when mir merge + RC_CMD_INVALID_MIR_REPO = 160009 + RC_CMD_INVALID_FILE = 160010 + RC_CMD_NO_RESULT = 160011 # no result for training, mining and infer + RC_CMD_OPENPAI_ERROR = 160012 + RC_CMD_NO_ANNOTATIONS = 160013 + RC_CMD_CAN_NOT_CALC_CONFUSION_MATRIX = 160014 + RC_CMD_INVALID_MODEL_PACKAGE_VERSION = 160015 + RC_CMD_INVALID_META_YAML_FILE = 160016 + RC_CMD_ERROR_UNKNOWN = 169999 + + @unique class CTLResponseCode(IntEnum): CTR_OK = 0 @@ -19,6 +45,7 @@ class CTLResponseCode(IntEnum): INVOKER_LABEL_TASK_NETWORK_ERROR = 130603 INVOKER_HTTP_ERROR = 130604 INVOKER_UNKNOWN_ERROR = 130605 + INVOKER_INVALID_ARGS = 130606 @unique @@ -27,7 +54,10 @@ class VizErrorCode(IntEnum): BRANCH_NOT_EXISTS = 140401 MODEL_NOT_EXISTS = 140402 DATASET_EVALUATION_NOT_EXISTS = 140403 + NO_CLASS_IDS = 140404 + INVALID_ANNO_TYPE = 140405 INTERNAL_ERROR = 140500 + TOO_MANY_DATASETS_TO_CHECK = 140600 @unique @@ -53,6 +83,7 @@ class APIErrorCode(IntEnum): INVALID_CONFIGURATION = 110109 INVALID_SCOPE = 110110 FAILED_TO_PROCESS_PROTECTED_RESOURCES = 110111 + SYSTEM_VERSION_CONFLICT = 110112 USER_NOT_FOUND = 110201 USER_DUPLICATED_NAME = 110202 @@ -69,6 +100,10 @@ class APIErrorCode(IntEnum): DATASET_FAILED_TO_CREATE = 110404 DATASET_PROTECTED_TO_DELETE = 110405 DATASETS_NOT_IN_SAME_GROUP = 110406 + INVALID_DATASET_STRUCTURE = 110407 + DATASET_FAILED_TO_IMPORT = 110408 + INVALID_DATASET_ZIP_FILE = 110409 + DATASET_INDEX_NOT_READY = 110410 ASSET_NOT_FOUND = 110501 @@ -106,6 +141,7 @@ class APIErrorCode(IntEnum): PROJECT_NOT_FOUND = 111401 PROJECT_DUPLICATED_NAME = 111402 PROJECT_FAILED_TO_CREATE = 111403 + INVALID_PROJECT = 111404 DATASET_GROUP_NOT_FOUND = 111501 DATASET_GROUP_DUPLICATED_NAME = 111502 @@ -118,6 +154,8 @@ class APIErrorCode(IntEnum): ITERATION_NOT_FOUND = 111701 ITERATION_FAILED_TO_CREATE = 111703 ITERATION_COULD_NOT_UPDATE_STAGE = 111704 + ITERATION_STEP_NOT_FOUND = 111705 + ITERATION_STEP_ALREADY_FINISHED = 111706 FAILED_TO_IMPORT_MODEL = 111801 @@ -125,3 +163,19 @@ class APIErrorCode(IntEnum): FAILED_TO_EVALUATE = 111902 DATASET_EVALUATION_NOT_FOUND = 111903 MISSING_OPERATIONS = 111904 + DATASET_EVALUATION_NO_ANNOTATIONS = 111905 + PREMATURE_DATASETS = 111906 + + MODEL_STAGE_NOT_FOUND = 112001 + INVALID_MODEL_STAGE_NAME = 112002 + + VIZ_ERROR = 112101 + FAILED_TO_PARSE_VIZ_RESP = 112102 + VIZ_TIMEOUT = 112103 + + VISUALIZATION_NOT_FOUND = 112201 + + +class UpdaterErrorCode(IntEnum): + INVALID_USER_LABEL_FILE = 170001 + SANDBOX_VERSION_NOT_SUPPORTED = 170002 diff --git a/ymir/backend/src/common/proto/backend.proto b/ymir/backend/src/common/proto/backend.proto index 41c8db259d..bfcf4447c4 100644 --- a/ymir/backend/src/common/proto/backend.proto +++ b/ymir/backend/src/common/proto/backend.proto @@ -2,53 +2,20 @@ syntax = "proto3"; package ymir.backend; -/// assertion type: training, validation or test -enum TvtType { - TvtTypeUnknown = 0; - TvtTypeTraining = 1; - TvtTypeValidation = 2; - TvtTypeTest = 3; -}; - -/// task type -enum TaskType { - TaskTypeUnknown = 0; - TaskTypeTraining = 1; - TaskTypeMining = 2; - TaskTypeLabel = 3; - TaskTypeFilter = 4; - TaskTypeImportData = 5; - TaskTypeExportData = 6; - TaskTypeCopyData = 7; - TaskTypeMerge = 8; - TaskTypeInfer = 9; - TaskTypeSampling = 10; - /// merge -> filter -> sampling - TaskTypeFusion = 11; - TaskTypeImportModel = 13; - TaskTypeCopyModel = 14; - TaskTypeDatasetInfer = 15; - - reserved 12, 16; -}; +import "mir_command.proto"; -enum LabelFormat { - NO_ANNOTATION = 0; - PASCAL_VOC = 1; - IF_ARK = 2; - LABEL_STUDIO_JSON = 3; -}; - -enum MirStorage { - MIR_METADATAS = 0; - MIR_ANNOTATIONS = 1; - MIR_KEYWORDS = 2; - MIR_TASKS = 3; -} +option go_package = "/protos"; enum MergeStrategy { STOP = 0; HOST = 1; + GUEST = 2; +} + +enum UnknownTypesStrategy { + UTS_STOP = 0; + UTS_IGNORE = 1; + UTS_ADD = 2; } enum RequestType { @@ -74,6 +41,7 @@ enum RequestType { CMD_EVALUATE = 19; CMD_REPO_CHECK = 20; CMD_REPO_CLEAR = 21; + CMD_VERSIONS_GET = 22; // Sandbox path operation USER_LIST = 101; @@ -86,7 +54,7 @@ enum RequestType { // Long task TASK_CREATE = 1001; - reserved 15, 1002; + reserved 15; } message GeneralReq { @@ -116,17 +84,18 @@ message GeneralReq { bool check_only=18; string executant_name = 19; MergeStrategy merge_strategy = 20; - TaskType terminated_task_type = 21; + mir.command.TaskType terminated_task_type = 21; oneof sampling { int32 sampling_count = 22; float sampling_rate = 23; } string task_parameters = 24; LabelCollection label_collection = 25; - EvaluateConfig evaluate_config = 26; + mir.command.EvaluateConfig evaluate_config = 26; + string model_stage = 27; ReqCreateTask req_create_task = 1001; - reserved 17, 1002; + reserved 17; } message GeneralResp { @@ -139,134 +108,95 @@ message GeneralResp { int32 available_gpu_counts = 8; LabelCollection label_collection = 9; bool ops_ret = 10; - RespCMDInference detection = 1001; + repeated string sandbox_versions = 11; + RespCMDInference detection = 1000; + bool enable_livecode = 1001; + mir.command.Evaluation evaluation = 1002; - reserved 5, 1000; + reserved 5; } // base args for create task request message ReqCreateTask { // task type - TaskType task_type = 1; - float sampling_rate = 2; - bool no_task_monitor = 3; - - TaskReqFilter filter = 101 [deprecated=true]; - TaskReqTraining training = 102; - TaskReqMining mining = 103; - TaskReqImporting importing = 104; - TaskReqExporting exporting = 105; - TaskReqInference inference = 106; - TaskReqCopyData copy = 107; - TaskReqLabeling labeling = 108; - TaskReqFusion fusion = 109; - TaskReqModelImporting model_importing = 110; -} + mir.command.TaskType task_type = 1; + bool no_task_monitor = 2; -message TaskReqFilter { - repeated string in_dataset_ids = 1; - repeated int32 in_class_ids = 2; - repeated int32 ex_class_ids = 3; + TaskReqTraining training = 101; + TaskReqMining mining = 102; + TaskReqImportDataset import_dataset = 103; + TaskReqExporting exporting = 104; + TaskReqCopyData copy = 105; + TaskReqLabeling labeling = 106; + TaskReqImportModel import_model = 107; } message TaskReqTraining { message TrainingDatasetType { string dataset_id = 1; - TvtType dataset_type = 2; + mir.command.TvtType dataset_type = 2; } repeated TrainingDatasetType in_dataset_types = 1; - repeated int32 in_class_ids = 2; - - reserved 3; + string preprocess_config = 2; } message TaskReqMining { - repeated string in_dataset_ids = 1; - repeated string ex_dataset_ids = 2; - int32 top_k = 4; // > 0, will keep all if set to 0. - bool generate_annotations = 6; - - reserved 3, 5; + int32 top_k = 1; // > 0, will keep all if set to 0. + bool generate_annotations = 2; } -message TaskReqImporting { +message TaskReqImportDataset { // store media files string asset_dir = 1; // single pascal xml per asset, same base_filename as in asset-folder - string annotation_dir = 2; - bool name_strategy_ignore = 3; + string pred_dir = 2; + string gt_dir = 3; + // strategy for unknown class types: stop, ignore or add + UnknownTypesStrategy unknown_types_strategy = 4; + bool clean_dirs = 5; } message TaskReqExporting { string dataset_id = 1; - LabelFormat format = 2; + mir.command.AnnoFormat format = 2; string asset_dir = 3; - string annotation_dir = 4; -} - -message TaskReqInference { + string pred_dir = 4; + string gt_dir = 5; } message TaskReqCopyData { string src_user_id = 1; string src_repo_id = 2; - string src_dataset_id = 3; - bool name_strategy_ignore = 4; - bool drop_annotations = 5; + bool name_strategy_ignore = 3; + bool drop_annotations = 4; } -message TaskReqLabeling { - string dataset_id = 1; - repeated string labeler_accounts = 2; - repeated int32 in_class_ids = 3; - string expert_instruction_url = 4; - string project_name = 5; - bool export_annotation = 6; +enum AnnotationType { + NOT_SET = 0; + GT = 1; + PRED = 2; } -message TaskReqFusion { - repeated string in_dataset_ids = 1; - repeated string ex_dataset_ids = 2; - MergeStrategy merge_strategy = 3; - repeated int32 in_class_ids = 4; - repeated int32 ex_class_ids = 5; - oneof sampling { - int32 count = 6; - float rate = 7; - } +message TaskReqLabeling { + repeated string labeler_accounts = 1; + string expert_instruction_url = 2; + string project_name = 3; + bool export_annotation = 4; + AnnotationType annotation_type = 5; } -message TaskReqModelImporting { +message TaskReqImportModel { string model_package_path = 1; } message RespCMDInference { /// key: image id, value: annotations of that single image - map image_annotations = 1; -}; - -message SingleImageAnnotations { - repeated Annotation annotations = 2; -}; - -message Annotation { - // Index of this annotation in current single image, may be different from the index in repeated field. - int32 index = 1; - Rect box = 2; - int32 class_id = 3; - double score = 4; - string class_name = 5; -}; - -message Rect { - int32 x = 1; - int32 y = 2; - int32 w = 3; - int32 h = 4; + map image_annotations = 1; }; message LabelCollection { repeated Label labels = 1; + string ymir_version = 2; } message Label { @@ -277,282 +207,7 @@ message Label { string update_time = 5; // RFC 3339 date strings } -message EvaluateConfig { - // confidence threshold, 0 to 1 - float conf_thr = 1; - // from:to:step, to value is excluded (same as python range) - string iou_thrs_interval = 2; - // need pr curve in evaluation result, default is false - bool need_pr_curve = 3; -} - service mir_controller_service { - /* - APIS FOR DATA MANAGEMENT - KEY CONCEPTS - sandbox: sandbox = sandbox_dir + docker_container - sandbox_dir = sandbox_root + user_name - docker_container = container of docker_image - where sandbox_root and docker_image are get from cli args - one user should have only one sandbox - but can have multiple mir repos in this sandbox - - CREATE_SANDBOX - creates a sandbox for a single user - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox already exists - other system errors occurred - - REMOVE_SANDBOX - removes a sandbox for a single user - it also removes all contents in the sandbox - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox not exists - other system errors occurred - - START_SANDBOX - starts a sandbox for a single user - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox not exists - sandbox already started - other docker errors occurred - - STOP_SANDBOX - stops a sandbox for a single user - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox not exists - sandbox already stopped - other docker errors occurred - - INIT - init a new mir repo in a running sandbox - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - already have mir repo with the same name - other docker errors occurred - other mir errors occurred - - BRANCH_LIST - list all branches in running sandbox for user - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_bool: if true, lists remote branches - if false, lists local branches - Returns: - 0: success - ext_strs: branches - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - BRANCH_DEL - remove one branch in running sandbox for user - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: branch to be deleted - GeneralReq.ext_bool: force delete even if this branch has not been merged yet - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - branch not found - branch not merged if ext_bool is false - other docker errors occurred - other mir errors occurred - - CHECKOUT_COMMIT - checkout to another commit, or to another branch, or to another tag - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: branch name, tag name or commit id - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - branch, tag or commit not found - other docker errors occurred - other mir errors occurred - - CHECKOUT_BRANCH - create a new branch in a running sandbox for user - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: new branch name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - new branch name already exists - other docker errors occurred - other mir errors occurred - - CLONE - clones a mir repo from server - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo url - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo url not available - other docker errors occurred - other mir errors occurred - - COMMIT - commit changes for mir repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: commit messages, multi lines enabled - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - commit messages empty, or contains only spaces, tabs or line breaks - other docker errors occurred - other mir errors occurred - - FILTER - filter assets (currently by asset keywords) in mir repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: predicates, keywords separated by commas or semicolons - comma means AND - semicolon means OR - for example: `person; cat, dog` means to filter assets which - have person, or have both cat and dog in asset keywords list - attention that comma (means AND) has higher priority - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - predicate empty - other docker errors occurred - other mir errors occurred - - LOG - get log from repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - GeneralResp.ext_strs: log infos - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - MERGE - merges current repo with another - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_int32: merge stragety, 0: MIX, 1: GUEST - GeneralReq.ext_str: guest branch name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - PULL - pulls (updates) contents from server - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - PUSH - pushes local commits to server - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_bool: creates new branch on server - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - RESET: currently not available - - STATUS - shows status of current repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - GeneralResp.message: summary of current repo - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - */ rpc data_manage_request(GeneralReq) returns (GeneralResp) {} // rpc view_request(GeneralReq) returns (GeneralResp) {} } // mcs_dm - -// service mcs_view -// service scm_tvt -// service mcs_auto_mining diff --git a/ymir/backend/src/common/proto/backend_pb2.py b/ymir/backend/src/common/proto/backend_pb2.py index e1da373ed3..40eafe00fe 100644 --- a/ymir/backend/src/common/proto/backend_pb2.py +++ b/ymir/backend/src/common/proto/backend_pb2.py @@ -12,242 +12,81 @@ _sym_db = _symbol_database.Default() +from mir.protos import mir_command_pb2 as mir__command__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='backend.proto', package='ymir.backend', syntax='proto3', - serialized_options=None, - create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\rbackend.proto\x12\x0cymir.backend\"\x9a\x06\n\nGeneralReq\x12\x0f\n\x07user_id\x18\x01 \x01(\t\x12\x0f\n\x07repo_id\x18\x02 \x01(\t\x12+\n\x08req_type\x18\x03 \x01(\x0e\x32\x19.ymir.backend.RequestType\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x14\n\x0csingleton_op\x18\x05 \x01(\t\x12\x13\n\x0bhis_task_id\x18\x06 \x01(\t\x12\x16\n\x0e\x64st_dataset_id\x18\x07 \x01(\t\x12\x16\n\x0ein_dataset_ids\x18\x08 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\t \x03(\t\x12\x14\n\x0cin_class_ids\x18\n \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x0b \x03(\x05\x12\r\n\x05\x66orce\x18\x0c \x01(\x08\x12\x16\n\x0e\x63ommit_message\x18\r \x01(\t\x12\x12\n\nmodel_hash\x18\x0e \x01(\t\x12\x11\n\tasset_dir\x18\x0f \x01(\t\x12\x1b\n\x13\x64ocker_image_config\x18\x10 \x01(\t\x12\x12\n\ncheck_only\x18\x12 \x01(\x08\x12\x16\n\x0e\x65xecutant_name\x18\x13 \x01(\t\x12\x33\n\x0emerge_strategy\x18\x14 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x34\n\x14terminated_task_type\x18\x15 \x01(\x0e\x32\x16.ymir.backend.TaskType\x12\x18\n\x0esampling_count\x18\x16 \x01(\x05H\x00\x12\x17\n\rsampling_rate\x18\x17 \x01(\x02H\x00\x12\x17\n\x0ftask_parameters\x18\x18 \x01(\t\x12\x37\n\x10label_collection\x18\x19 \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x35\n\x0f\x65valuate_config\x18\x1a \x01(\x0b\x32\x1c.ymir.backend.EvaluateConfig\x12\x35\n\x0freq_create_task\x18\xe9\x07 \x01(\x0b\x32\x1b.ymir.backend.ReqCreateTaskB\n\n\x08samplingJ\x04\x08\x11\x10\x12J\x06\x08\xea\x07\x10\xeb\x07\"\x97\x03\n\x0bGeneralResp\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0breq_task_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x10\n\x08\x65xt_strs\x18\x04 \x03(\t\x12\x0f\n\x07hash_id\x18\x06 \x01(\t\x12M\n\x13\x64ocker_image_config\x18\x07 \x03(\x0b\x32\x30.ymir.backend.GeneralResp.DockerImageConfigEntry\x12\x1c\n\x14\x61vailable_gpu_counts\x18\x08 \x01(\x05\x12\x37\n\x10label_collection\x18\t \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x0f\n\x07ops_ret\x18\n \x01(\x08\x12\x32\n\tdetection\x18\xe9\x07 \x01(\x0b\x32\x1e.ymir.backend.RespCMDInference\x1a\x38\n\x16\x44ockerImageConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x05\x10\x06J\x06\x08\xe8\x07\x10\xe9\x07\"\xdb\x04\n\rReqCreateTask\x12)\n\ttask_type\x18\x01 \x01(\x0e\x32\x16.ymir.backend.TaskType\x12\x15\n\rsampling_rate\x18\x02 \x01(\x02\x12\x17\n\x0fno_task_monitor\x18\x03 \x01(\x08\x12/\n\x06\x66ilter\x18\x65 \x01(\x0b\x32\x1b.ymir.backend.TaskReqFilterB\x02\x18\x01\x12/\n\x08training\x18\x66 \x01(\x0b\x32\x1d.ymir.backend.TaskReqTraining\x12+\n\x06mining\x18g \x01(\x0b\x32\x1b.ymir.backend.TaskReqMining\x12\x31\n\timporting\x18h \x01(\x0b\x32\x1e.ymir.backend.TaskReqImporting\x12\x31\n\texporting\x18i \x01(\x0b\x32\x1e.ymir.backend.TaskReqExporting\x12\x31\n\tinference\x18j \x01(\x0b\x32\x1e.ymir.backend.TaskReqInference\x12+\n\x04\x63opy\x18k \x01(\x0b\x32\x1d.ymir.backend.TaskReqCopyData\x12/\n\x08labeling\x18l \x01(\x0b\x32\x1d.ymir.backend.TaskReqLabeling\x12+\n\x06\x66usion\x18m \x01(\x0b\x32\x1b.ymir.backend.TaskReqFusion\x12<\n\x0fmodel_importing\x18n \x01(\x0b\x32#.ymir.backend.TaskReqModelImporting\"S\n\rTaskReqFilter\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x14\n\x0cin_class_ids\x18\x02 \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x03 \x03(\x05\"\xd2\x01\n\x0fTaskReqTraining\x12K\n\x10in_dataset_types\x18\x01 \x03(\x0b\x32\x31.ymir.backend.TaskReqTraining.TrainingDatasetType\x12\x14\n\x0cin_class_ids\x18\x02 \x03(\x05\x1aV\n\x13TrainingDatasetType\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12+\n\x0c\x64\x61taset_type\x18\x02 \x01(\x0e\x32\x15.ymir.backend.TvtTypeJ\x04\x08\x03\x10\x04\"x\n\rTaskReqMining\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\x02 \x03(\t\x12\r\n\x05top_k\x18\x04 \x01(\x05\x12\x1c\n\x14generate_annotations\x18\x06 \x01(\x08J\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06\"[\n\x10TaskReqImporting\x12\x11\n\tasset_dir\x18\x01 \x01(\t\x12\x16\n\x0e\x61nnotation_dir\x18\x02 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x03 \x01(\x08\"|\n\x10TaskReqExporting\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12)\n\x06\x66ormat\x18\x02 \x01(\x0e\x32\x19.ymir.backend.LabelFormat\x12\x11\n\tasset_dir\x18\x03 \x01(\t\x12\x16\n\x0e\x61nnotation_dir\x18\x04 \x01(\t\"\x12\n\x10TaskReqInference\"\x8b\x01\n\x0fTaskReqCopyData\x12\x13\n\x0bsrc_user_id\x18\x01 \x01(\t\x12\x13\n\x0bsrc_repo_id\x18\x02 \x01(\t\x12\x16\n\x0esrc_dataset_id\x18\x03 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x04 \x01(\x08\x12\x18\n\x10\x64rop_annotations\x18\x05 \x01(\x08\"\xa6\x01\n\x0fTaskReqLabeling\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\x18\n\x10labeler_accounts\x18\x02 \x03(\t\x12\x14\n\x0cin_class_ids\x18\x03 \x03(\x05\x12\x1e\n\x16\x65xpert_instruction_url\x18\x04 \x01(\t\x12\x14\n\x0cproject_name\x18\x05 \x01(\t\x12\x19\n\x11\x65xport_annotation\x18\x06 \x01(\x08\"\xcd\x01\n\rTaskReqFusion\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\x02 \x03(\t\x12\x33\n\x0emerge_strategy\x18\x03 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x14\n\x0cin_class_ids\x18\x04 \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x05 \x03(\x05\x12\x0f\n\x05\x63ount\x18\x06 \x01(\x05H\x00\x12\x0e\n\x04rate\x18\x07 \x01(\x02H\x00\x42\n\n\x08sampling\"3\n\x15TaskReqModelImporting\x12\x1a\n\x12model_package_path\x18\x01 \x01(\t\"\xc2\x01\n\x10RespCMDInference\x12O\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x34.ymir.backend.RespCMDInference.ImageAnnotationsEntry\x1a]\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.ymir.backend.SingleImageAnnotations:\x02\x38\x01\"G\n\x16SingleImageAnnotations\x12-\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x18.ymir.backend.Annotation\"q\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1f\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x12.ymir.backend.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\x12\x12\n\nclass_name\x18\x05 \x01(\t\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"6\n\x0fLabelCollection\x12#\n\x06labels\x18\x01 \x03(\x0b\x32\x13.ymir.backend.Label\"\\\n\x05Label\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x61liases\x18\x03 \x03(\t\x12\x13\n\x0b\x63reate_time\x18\x04 \x01(\t\x12\x13\n\x0bupdate_time\x18\x05 \x01(\t\"T\n\x0e\x45valuateConfig\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x02 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x03 \x01(\x08*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\xdc\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x15\n\x11TaskTypeCopyModel\x10\x0e\x12\x18\n\x14TaskTypeDatasetInfer\x10\x0f\"\x04\x08\x0c\x10\x0c\"\x04\x08\x10\x10\x10*S\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x12\x15\n\x11LABEL_STUDIO_JSON\x10\x03*U\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03*#\n\rMergeStrategy\x12\x08\n\x04STOP\x10\x00\x12\x08\n\x04HOST\x10\x01*\x90\x04\n\x0bRequestType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x12\n\x0e\x43MD_BRANCH_DEL\x10\x01\x12\x13\n\x0f\x43MD_BRANCH_LIST\x10\x02\x12\x17\n\x13\x43MD_BRANCH_CHECKOUT\x10\x03\x12\x15\n\x11\x43MD_BRANCH_CREATE\x10\x04\x12\r\n\tCMD_CLONE\x10\x05\x12\x0e\n\nCMD_COMMIT\x10\x06\x12\x0e\n\nCMD_FILTER\x10\x07\x12\x0c\n\x08\x43MD_INIT\x10\x08\x12\x0b\n\x07\x43MD_LOG\x10\t\x12\r\n\tCMD_MERGE\x10\n\x12\x11\n\rCMD_INFERENCE\x10\x0b\x12\x11\n\rCMD_LABEL_ADD\x10\x0c\x12\x11\n\rCMD_LABEL_GET\x10\r\x12\x11\n\rCMD_TERMINATE\x10\x0e\x12\x12\n\x0e\x43MD_PULL_IMAGE\x10\x10\x12\x14\n\x10\x43MD_GPU_INFO_GET\x10\x11\x12\x10\n\x0c\x43MD_SAMPLING\x10\x12\x12\x10\n\x0c\x43MD_EVALUATE\x10\x13\x12\x12\n\x0e\x43MD_REPO_CHECK\x10\x14\x12\x12\n\x0e\x43MD_REPO_CLEAR\x10\x15\x12\r\n\tUSER_LIST\x10\x65\x12\x0f\n\x0bUSER_CREATE\x10\x66\x12\x0f\n\x0bUSER_REMOVE\x10g\x12\r\n\tREPO_LIST\x10h\x12\x0f\n\x0bREPO_CREATE\x10i\x12\x0f\n\x0bREPO_REMOVE\x10j\x12\x10\n\x0bTASK_CREATE\x10\xe9\x07\"\x04\x08\x0f\x10\x0f\"\x06\x08\xea\x07\x10\xea\x07\x32\x66\n\x16mir_controller_service\x12L\n\x13\x64\x61ta_manage_request\x12\x18.ymir.backend.GeneralReq\x1a\x19.ymir.backend.GeneralResp\"\x00\x62\x06proto3' -) - -_TVTTYPE = _descriptor.EnumDescriptor( - name='TvtType', - full_name='ymir.backend.TvtType', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='TvtTypeUnknown', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TvtTypeTraining', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TvtTypeValidation', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TvtTypeTest', index=3, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - serialized_start=3748, - serialized_end=3838, -) -_sym_db.RegisterEnumDescriptor(_TVTTYPE) - -TvtType = enum_type_wrapper.EnumTypeWrapper(_TVTTYPE) -_TASKTYPE = _descriptor.EnumDescriptor( - name='TaskType', - full_name='ymir.backend.TaskType', - filename=None, - file=DESCRIPTOR, + serialized_options=b'Z\007/protos', create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='TaskTypeUnknown', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeTraining', index=1, number=1, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeMining', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeLabel', index=3, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeFilter', index=4, number=4, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeImportData', index=5, number=5, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeExportData', index=6, number=6, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeCopyData', index=7, number=7, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeMerge', index=8, number=8, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeInfer', index=9, number=9, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeSampling', index=10, number=10, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeFusion', index=11, number=11, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeImportModel', index=12, number=13, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeCopyModel', index=13, number=14, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='TaskTypeDatasetInfer', index=14, number=15, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - serialized_start=3841, - serialized_end=4189, -) -_sym_db.RegisterEnumDescriptor(_TASKTYPE) + serialized_pb=b'\n\rbackend.proto\x12\x0cymir.backend\x1a\x11mir_command.proto\"\xa5\x06\n\nGeneralReq\x12\x0f\n\x07user_id\x18\x01 \x01(\t\x12\x0f\n\x07repo_id\x18\x02 \x01(\t\x12+\n\x08req_type\x18\x03 \x01(\x0e\x32\x19.ymir.backend.RequestType\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x14\n\x0csingleton_op\x18\x05 \x01(\t\x12\x13\n\x0bhis_task_id\x18\x06 \x01(\t\x12\x16\n\x0e\x64st_dataset_id\x18\x07 \x01(\t\x12\x16\n\x0ein_dataset_ids\x18\x08 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\t \x03(\t\x12\x14\n\x0cin_class_ids\x18\n \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x0b \x03(\x05\x12\r\n\x05\x66orce\x18\x0c \x01(\x08\x12\x16\n\x0e\x63ommit_message\x18\r \x01(\t\x12\x12\n\nmodel_hash\x18\x0e \x01(\t\x12\x11\n\tasset_dir\x18\x0f \x01(\t\x12\x1b\n\x13\x64ocker_image_config\x18\x10 \x01(\t\x12\x12\n\ncheck_only\x18\x12 \x01(\x08\x12\x16\n\x0e\x65xecutant_name\x18\x13 \x01(\t\x12\x33\n\x0emerge_strategy\x18\x14 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x33\n\x14terminated_task_type\x18\x15 \x01(\x0e\x32\x15.mir.command.TaskType\x12\x18\n\x0esampling_count\x18\x16 \x01(\x05H\x00\x12\x17\n\rsampling_rate\x18\x17 \x01(\x02H\x00\x12\x17\n\x0ftask_parameters\x18\x18 \x01(\t\x12\x37\n\x10label_collection\x18\x19 \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x34\n\x0f\x65valuate_config\x18\x1a \x01(\x0b\x32\x1b.mir.command.EvaluateConfig\x12\x13\n\x0bmodel_stage\x18\x1b \x01(\t\x12\x35\n\x0freq_create_task\x18\xe9\x07 \x01(\x0b\x32\x1b.ymir.backend.ReqCreateTaskB\n\n\x08samplingJ\x04\x08\x11\x10\x12\"\xf1\x03\n\x0bGeneralResp\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0breq_task_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x10\n\x08\x65xt_strs\x18\x04 \x03(\t\x12\x0f\n\x07hash_id\x18\x06 \x01(\t\x12M\n\x13\x64ocker_image_config\x18\x07 \x03(\x0b\x32\x30.ymir.backend.GeneralResp.DockerImageConfigEntry\x12\x1c\n\x14\x61vailable_gpu_counts\x18\x08 \x01(\x05\x12\x37\n\x10label_collection\x18\t \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x0f\n\x07ops_ret\x18\n \x01(\x08\x12\x18\n\x10sandbox_versions\x18\x0b \x03(\t\x12\x32\n\tdetection\x18\xe8\x07 \x01(\x0b\x32\x1e.ymir.backend.RespCMDInference\x12\x18\n\x0f\x65nable_livecode\x18\xe9\x07 \x01(\x08\x12,\n\nevaluation\x18\xea\x07 \x01(\x0b\x32\x17.mir.command.Evaluation\x1a\x38\n\x16\x44ockerImageConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x05\x10\x06\"\xb5\x03\n\rReqCreateTask\x12(\n\ttask_type\x18\x01 \x01(\x0e\x32\x15.mir.command.TaskType\x12\x17\n\x0fno_task_monitor\x18\x02 \x01(\x08\x12/\n\x08training\x18\x65 \x01(\x0b\x32\x1d.ymir.backend.TaskReqTraining\x12+\n\x06mining\x18\x66 \x01(\x0b\x32\x1b.ymir.backend.TaskReqMining\x12:\n\x0eimport_dataset\x18g \x01(\x0b\x32\".ymir.backend.TaskReqImportDataset\x12\x31\n\texporting\x18h \x01(\x0b\x32\x1e.ymir.backend.TaskReqExporting\x12+\n\x04\x63opy\x18i \x01(\x0b\x32\x1d.ymir.backend.TaskReqCopyData\x12/\n\x08labeling\x18j \x01(\x0b\x32\x1d.ymir.backend.TaskReqLabeling\x12\x36\n\x0cimport_model\x18k \x01(\x0b\x32 .ymir.backend.TaskReqImportModel\"\xd0\x01\n\x0fTaskReqTraining\x12K\n\x10in_dataset_types\x18\x01 \x03(\x0b\x32\x31.ymir.backend.TaskReqTraining.TrainingDatasetType\x12\x19\n\x11preprocess_config\x18\x02 \x01(\t\x1aU\n\x13TrainingDatasetType\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12*\n\x0c\x64\x61taset_type\x18\x02 \x01(\x0e\x32\x14.mir.command.TvtType\"<\n\rTaskReqMining\x12\r\n\x05top_k\x18\x01 \x01(\x05\x12\x1c\n\x14generate_annotations\x18\x02 \x01(\x08\"\xa3\x01\n\x14TaskReqImportDataset\x12\x11\n\tasset_dir\x18\x01 \x01(\t\x12\x10\n\x08pred_dir\x18\x02 \x01(\t\x12\x0e\n\x06gt_dir\x18\x03 \x01(\t\x12\x42\n\x16unknown_types_strategy\x18\x04 \x01(\x0e\x32\".ymir.backend.UnknownTypesStrategy\x12\x12\n\nclean_dirs\x18\x05 \x01(\x08\"\x84\x01\n\x10TaskReqExporting\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\'\n\x06\x66ormat\x18\x02 \x01(\x0e\x32\x17.mir.command.AnnoFormat\x12\x11\n\tasset_dir\x18\x03 \x01(\t\x12\x10\n\x08pred_dir\x18\x04 \x01(\t\x12\x0e\n\x06gt_dir\x18\x05 \x01(\t\"s\n\x0fTaskReqCopyData\x12\x13\n\x0bsrc_user_id\x18\x01 \x01(\t\x12\x13\n\x0bsrc_repo_id\x18\x02 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x03 \x01(\x08\x12\x18\n\x10\x64rop_annotations\x18\x04 \x01(\x08\"\xb3\x01\n\x0fTaskReqLabeling\x12\x18\n\x10labeler_accounts\x18\x01 \x03(\t\x12\x1e\n\x16\x65xpert_instruction_url\x18\x02 \x01(\t\x12\x14\n\x0cproject_name\x18\x03 \x01(\t\x12\x19\n\x11\x65xport_annotation\x18\x04 \x01(\x08\x12\x35\n\x0f\x61nnotation_type\x18\x05 \x01(\x0e\x32\x1c.ymir.backend.AnnotationType\"0\n\x12TaskReqImportModel\x12\x1a\n\x12model_package_path\x18\x01 \x01(\t\"\xc1\x01\n\x10RespCMDInference\x12O\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x34.ymir.backend.RespCMDInference.ImageAnnotationsEntry\x1a\\\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command.SingleImageAnnotations:\x02\x38\x01\"L\n\x0fLabelCollection\x12#\n\x06labels\x18\x01 \x03(\x0b\x32\x13.ymir.backend.Label\x12\x14\n\x0cymir_version\x18\x02 \x01(\t\"\\\n\x05Label\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x61liases\x18\x03 \x03(\t\x12\x13\n\x0b\x63reate_time\x18\x04 \x01(\t\x12\x13\n\x0bupdate_time\x18\x05 \x01(\t*.\n\rMergeStrategy\x12\x08\n\x04STOP\x10\x00\x12\x08\n\x04HOST\x10\x01\x12\t\n\x05GUEST\x10\x02*A\n\x14UnknownTypesStrategy\x12\x0c\n\x08UTS_STOP\x10\x00\x12\x0e\n\nUTS_IGNORE\x10\x01\x12\x0b\n\x07UTS_ADD\x10\x02*\x9e\x04\n\x0bRequestType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x12\n\x0e\x43MD_BRANCH_DEL\x10\x01\x12\x13\n\x0f\x43MD_BRANCH_LIST\x10\x02\x12\x17\n\x13\x43MD_BRANCH_CHECKOUT\x10\x03\x12\x15\n\x11\x43MD_BRANCH_CREATE\x10\x04\x12\r\n\tCMD_CLONE\x10\x05\x12\x0e\n\nCMD_COMMIT\x10\x06\x12\x0e\n\nCMD_FILTER\x10\x07\x12\x0c\n\x08\x43MD_INIT\x10\x08\x12\x0b\n\x07\x43MD_LOG\x10\t\x12\r\n\tCMD_MERGE\x10\n\x12\x11\n\rCMD_INFERENCE\x10\x0b\x12\x11\n\rCMD_LABEL_ADD\x10\x0c\x12\x11\n\rCMD_LABEL_GET\x10\r\x12\x11\n\rCMD_TERMINATE\x10\x0e\x12\x12\n\x0e\x43MD_PULL_IMAGE\x10\x10\x12\x14\n\x10\x43MD_GPU_INFO_GET\x10\x11\x12\x10\n\x0c\x43MD_SAMPLING\x10\x12\x12\x10\n\x0c\x43MD_EVALUATE\x10\x13\x12\x12\n\x0e\x43MD_REPO_CHECK\x10\x14\x12\x12\n\x0e\x43MD_REPO_CLEAR\x10\x15\x12\x14\n\x10\x43MD_VERSIONS_GET\x10\x16\x12\r\n\tUSER_LIST\x10\x65\x12\x0f\n\x0bUSER_CREATE\x10\x66\x12\x0f\n\x0bUSER_REMOVE\x10g\x12\r\n\tREPO_LIST\x10h\x12\x0f\n\x0bREPO_CREATE\x10i\x12\x0f\n\x0bREPO_REMOVE\x10j\x12\x10\n\x0bTASK_CREATE\x10\xe9\x07\"\x04\x08\x0f\x10\x0f*/\n\x0e\x41nnotationType\x12\x0b\n\x07NOT_SET\x10\x00\x12\x06\n\x02GT\x10\x01\x12\x08\n\x04PRED\x10\x02\x32\x66\n\x16mir_controller_service\x12L\n\x13\x64\x61ta_manage_request\x12\x18.ymir.backend.GeneralReq\x1a\x19.ymir.backend.GeneralResp\"\x00\x42\tZ\x07/protosb\x06proto3' + , + dependencies=[mir__command__pb2.DESCRIPTOR,]) -TaskType = enum_type_wrapper.EnumTypeWrapper(_TASKTYPE) -_LABELFORMAT = _descriptor.EnumDescriptor( - name='LabelFormat', - full_name='ymir.backend.LabelFormat', +_MERGESTRATEGY = _descriptor.EnumDescriptor( + name='MergeStrategy', + full_name='ymir.backend.MergeStrategy', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name='NO_ANNOTATION', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='PASCAL_VOC', index=1, number=1, + name='STOP', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='IF_ARK', index=2, number=2, + name='HOST', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='LABEL_STUDIO_JSON', index=3, number=3, + name='GUEST', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=4191, - serialized_end=4274, + serialized_start=3089, + serialized_end=3135, ) -_sym_db.RegisterEnumDescriptor(_LABELFORMAT) +_sym_db.RegisterEnumDescriptor(_MERGESTRATEGY) -LabelFormat = enum_type_wrapper.EnumTypeWrapper(_LABELFORMAT) -_MIRSTORAGE = _descriptor.EnumDescriptor( - name='MirStorage', - full_name='ymir.backend.MirStorage', +MergeStrategy = enum_type_wrapper.EnumTypeWrapper(_MERGESTRATEGY) +_UNKNOWNTYPESSTRATEGY = _descriptor.EnumDescriptor( + name='UnknownTypesStrategy', + full_name='ymir.backend.UnknownTypesStrategy', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name='MIR_METADATAS', index=0, number=0, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='MIR_ANNOTATIONS', index=1, number=1, + name='UTS_STOP', index=0, number=0, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='MIR_KEYWORDS', index=2, number=2, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - _descriptor.EnumValueDescriptor( - name='MIR_TASKS', index=3, number=3, - serialized_options=None, - type=None, - create_key=_descriptor._internal_create_key), - ], - containing_type=None, - serialized_options=None, - serialized_start=4276, - serialized_end=4361, -) -_sym_db.RegisterEnumDescriptor(_MIRSTORAGE) - -MirStorage = enum_type_wrapper.EnumTypeWrapper(_MIRSTORAGE) -_MERGESTRATEGY = _descriptor.EnumDescriptor( - name='MergeStrategy', - full_name='ymir.backend.MergeStrategy', - filename=None, - file=DESCRIPTOR, - create_key=_descriptor._internal_create_key, - values=[ - _descriptor.EnumValueDescriptor( - name='STOP', index=0, number=0, + name='UTS_IGNORE', index=1, number=1, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='HOST', index=1, number=1, + name='UTS_ADD', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=4363, - serialized_end=4398, + serialized_start=3137, + serialized_end=3202, ) -_sym_db.RegisterEnumDescriptor(_MERGESTRATEGY) +_sym_db.RegisterEnumDescriptor(_UNKNOWNTYPESSTRATEGY) -MergeStrategy = enum_type_wrapper.EnumTypeWrapper(_MERGESTRATEGY) +UnknownTypesStrategy = enum_type_wrapper.EnumTypeWrapper(_UNKNOWNTYPESSTRATEGY) _REQUESTTYPE = _descriptor.EnumDescriptor( name='RequestType', full_name='ymir.backend.RequestType', @@ -361,78 +200,91 @@ type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='USER_LIST', index=21, number=101, + name='CMD_VERSIONS_GET', index=21, number=22, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='USER_CREATE', index=22, number=102, + name='USER_LIST', index=22, number=101, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='USER_REMOVE', index=23, number=103, + name='USER_CREATE', index=23, number=102, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='REPO_LIST', index=24, number=104, + name='USER_REMOVE', index=24, number=103, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='REPO_CREATE', index=25, number=105, + name='REPO_LIST', index=25, number=104, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='REPO_REMOVE', index=26, number=106, + name='REPO_CREATE', index=26, number=105, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='TASK_CREATE', index=27, number=1001, + name='REPO_REMOVE', index=27, number=106, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TASK_CREATE', index=28, number=1001, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=4401, - serialized_end=4929, + serialized_start=3205, + serialized_end=3747, ) _sym_db.RegisterEnumDescriptor(_REQUESTTYPE) RequestType = enum_type_wrapper.EnumTypeWrapper(_REQUESTTYPE) -TvtTypeUnknown = 0 -TvtTypeTraining = 1 -TvtTypeValidation = 2 -TvtTypeTest = 3 -TaskTypeUnknown = 0 -TaskTypeTraining = 1 -TaskTypeMining = 2 -TaskTypeLabel = 3 -TaskTypeFilter = 4 -TaskTypeImportData = 5 -TaskTypeExportData = 6 -TaskTypeCopyData = 7 -TaskTypeMerge = 8 -TaskTypeInfer = 9 -TaskTypeSampling = 10 -TaskTypeFusion = 11 -TaskTypeImportModel = 13 -TaskTypeCopyModel = 14 -TaskTypeDatasetInfer = 15 -NO_ANNOTATION = 0 -PASCAL_VOC = 1 -IF_ARK = 2 -LABEL_STUDIO_JSON = 3 -MIR_METADATAS = 0 -MIR_ANNOTATIONS = 1 -MIR_KEYWORDS = 2 -MIR_TASKS = 3 +_ANNOTATIONTYPE = _descriptor.EnumDescriptor( + name='AnnotationType', + full_name='ymir.backend.AnnotationType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='NOT_SET', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='GT', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='PRED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=3749, + serialized_end=3796, +) +_sym_db.RegisterEnumDescriptor(_ANNOTATIONTYPE) + +AnnotationType = enum_type_wrapper.EnumTypeWrapper(_ANNOTATIONTYPE) STOP = 0 HOST = 1 +GUEST = 2 +UTS_STOP = 0 +UTS_IGNORE = 1 +UTS_ADD = 2 UNKNOWN = 0 CMD_BRANCH_DEL = 1 CMD_BRANCH_LIST = 2 @@ -454,6 +306,7 @@ CMD_EVALUATE = 19 CMD_REPO_CHECK = 20 CMD_REPO_CLEAR = 21 +CMD_VERSIONS_GET = 22 USER_LIST = 101 USER_CREATE = 102 USER_REMOVE = 103 @@ -461,6 +314,9 @@ REPO_CREATE = 105 REPO_REMOVE = 106 TASK_CREATE = 1001 +NOT_SET = 0 +GT = 1 +PRED = 2 @@ -648,7 +504,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='req_create_task', full_name='ymir.backend.GeneralReq.req_create_task', index=25, + name='model_stage', full_name='ymir.backend.GeneralReq.model_stage', index=25, + number=27, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='req_create_task', full_name='ymir.backend.GeneralReq.req_create_task', index=26, number=1001, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -671,8 +534,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=32, - serialized_end=826, + serialized_start=51, + serialized_end=856, ) @@ -710,8 +573,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1166, - serialized_end=1222, + serialized_start=1294, + serialized_end=1350, ) _GENERALRESP = _descriptor.Descriptor( @@ -786,8 +649,29 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='detection', full_name='ymir.backend.GeneralResp.detection', index=9, - number=1001, type=11, cpp_type=10, label=1, + name='sandbox_versions', full_name='ymir.backend.GeneralResp.sandbox_versions', index=9, + number=11, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='detection', full_name='ymir.backend.GeneralResp.detection', index=10, + number=1000, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='enable_livecode', full_name='ymir.backend.GeneralResp.enable_livecode', index=11, + number=1001, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='evaluation', full_name='ymir.backend.GeneralResp.evaluation', index=12, + number=1002, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -804,8 +688,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=829, - serialized_end=1236, + serialized_start=859, + serialized_end=1356, ) @@ -825,135 +709,61 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='sampling_rate', full_name='ymir.backend.ReqCreateTask.sampling_rate', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='no_task_monitor', full_name='ymir.backend.ReqCreateTask.no_task_monitor', index=2, - number=3, type=8, cpp_type=7, label=1, + name='no_task_monitor', full_name='ymir.backend.ReqCreateTask.no_task_monitor', index=1, + number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='filter', full_name='ymir.backend.ReqCreateTask.filter', index=3, + name='training', full_name='ymir.backend.ReqCreateTask.training', index=2, number=101, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=b'\030\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='training', full_name='ymir.backend.ReqCreateTask.training', index=4, + name='mining', full_name='ymir.backend.ReqCreateTask.mining', index=3, number=102, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='mining', full_name='ymir.backend.ReqCreateTask.mining', index=5, + name='import_dataset', full_name='ymir.backend.ReqCreateTask.import_dataset', index=4, number=103, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='importing', full_name='ymir.backend.ReqCreateTask.importing', index=6, + name='exporting', full_name='ymir.backend.ReqCreateTask.exporting', index=5, number=104, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='exporting', full_name='ymir.backend.ReqCreateTask.exporting', index=7, + name='copy', full_name='ymir.backend.ReqCreateTask.copy', index=6, number=105, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='inference', full_name='ymir.backend.ReqCreateTask.inference', index=8, + name='labeling', full_name='ymir.backend.ReqCreateTask.labeling', index=7, number=106, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='copy', full_name='ymir.backend.ReqCreateTask.copy', index=9, + name='import_model', full_name='ymir.backend.ReqCreateTask.import_model', index=8, number=107, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='labeling', full_name='ymir.backend.ReqCreateTask.labeling', index=10, - number=108, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='fusion', full_name='ymir.backend.ReqCreateTask.fusion', index=11, - number=109, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='model_importing', full_name='ymir.backend.ReqCreateTask.model_importing', index=12, - number=110, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1239, - serialized_end=1842, -) - - -_TASKREQFILTER = _descriptor.Descriptor( - name='TaskReqFilter', - full_name='ymir.backend.TaskReqFilter', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='in_dataset_ids', full_name='ymir.backend.TaskReqFilter.in_dataset_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='in_class_ids', full_name='ymir.backend.TaskReqFilter.in_class_ids', index=1, - number=2, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='ex_class_ids', full_name='ymir.backend.TaskReqFilter.ex_class_ids', index=2, - number=3, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], @@ -966,8 +776,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1844, - serialized_end=1927, + serialized_start=1359, + serialized_end=1796, ) @@ -1005,8 +815,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2048, - serialized_end=2134, + serialized_start=1922, + serialized_end=2007, ) _TASKREQTRAINING = _descriptor.Descriptor( @@ -1025,9 +835,9 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='in_class_ids', full_name='ymir.backend.TaskReqTraining.in_class_ids', index=1, - number=2, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], + name='preprocess_config', full_name='ymir.backend.TaskReqTraining.preprocess_config', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -1043,8 +853,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1930, - serialized_end=2140, + serialized_start=1799, + serialized_end=2007, ) @@ -1057,29 +867,15 @@ create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='in_dataset_ids', full_name='ymir.backend.TaskReqMining.in_dataset_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='ex_dataset_ids', full_name='ymir.backend.TaskReqMining.ex_dataset_ids', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='top_k', full_name='ymir.backend.TaskReqMining.top_k', index=2, - number=4, type=5, cpp_type=1, label=1, + name='top_k', full_name='ymir.backend.TaskReqMining.top_k', index=0, + number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='generate_annotations', full_name='ymir.backend.TaskReqMining.generate_annotations', index=3, - number=6, type=8, cpp_type=7, label=1, + name='generate_annotations', full_name='ymir.backend.TaskReqMining.generate_annotations', index=1, + number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -1096,36 +892,50 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2142, - serialized_end=2262, + serialized_start=2009, + serialized_end=2069, ) -_TASKREQIMPORTING = _descriptor.Descriptor( - name='TaskReqImporting', - full_name='ymir.backend.TaskReqImporting', +_TASKREQIMPORTDATASET = _descriptor.Descriptor( + name='TaskReqImportDataset', + full_name='ymir.backend.TaskReqImportDataset', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='asset_dir', full_name='ymir.backend.TaskReqImporting.asset_dir', index=0, + name='asset_dir', full_name='ymir.backend.TaskReqImportDataset.asset_dir', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='annotation_dir', full_name='ymir.backend.TaskReqImporting.annotation_dir', index=1, + name='pred_dir', full_name='ymir.backend.TaskReqImportDataset.pred_dir', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='name_strategy_ignore', full_name='ymir.backend.TaskReqImporting.name_strategy_ignore', index=2, - number=3, type=8, cpp_type=7, label=1, + name='gt_dir', full_name='ymir.backend.TaskReqImportDataset.gt_dir', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unknown_types_strategy', full_name='ymir.backend.TaskReqImportDataset.unknown_types_strategy', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='clean_dirs', full_name='ymir.backend.TaskReqImportDataset.clean_dirs', index=4, + number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -1142,8 +952,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2264, - serialized_end=2355, + serialized_start=2072, + serialized_end=2235, ) @@ -1177,12 +987,19 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='annotation_dir', full_name='ymir.backend.TaskReqExporting.annotation_dir', index=3, + name='pred_dir', full_name='ymir.backend.TaskReqExporting.pred_dir', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_dir', full_name='ymir.backend.TaskReqExporting.gt_dir', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], @@ -1195,33 +1012,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2357, - serialized_end=2481, -) - - -_TASKREQINFERENCE = _descriptor.Descriptor( - name='TaskReqInference', - full_name='ymir.backend.TaskReqInference', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2483, - serialized_end=2501, + serialized_start=2238, + serialized_end=2370, ) @@ -1248,22 +1040,15 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='src_dataset_id', full_name='ymir.backend.TaskReqCopyData.src_dataset_id', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='name_strategy_ignore', full_name='ymir.backend.TaskReqCopyData.name_strategy_ignore', index=3, - number=4, type=8, cpp_type=7, label=1, + name='name_strategy_ignore', full_name='ymir.backend.TaskReqCopyData.name_strategy_ignore', index=2, + number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='drop_annotations', full_name='ymir.backend.TaskReqCopyData.drop_annotations', index=4, - number=5, type=8, cpp_type=7, label=1, + name='drop_annotations', full_name='ymir.backend.TaskReqCopyData.drop_annotations', index=3, + number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -1280,8 +1065,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2504, - serialized_end=2643, + serialized_start=2372, + serialized_end=2487, ) @@ -1294,121 +1079,40 @@ create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='dataset_id', full_name='ymir.backend.TaskReqLabeling.dataset_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='labeler_accounts', full_name='ymir.backend.TaskReqLabeling.labeler_accounts', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='in_class_ids', full_name='ymir.backend.TaskReqLabeling.in_class_ids', index=2, - number=3, type=5, cpp_type=1, label=3, + name='labeler_accounts', full_name='ymir.backend.TaskReqLabeling.labeler_accounts', index=0, + number=1, type=9, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='expert_instruction_url', full_name='ymir.backend.TaskReqLabeling.expert_instruction_url', index=3, - number=4, type=9, cpp_type=9, label=1, + name='expert_instruction_url', full_name='ymir.backend.TaskReqLabeling.expert_instruction_url', index=1, + number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='project_name', full_name='ymir.backend.TaskReqLabeling.project_name', index=4, - number=5, type=9, cpp_type=9, label=1, + name='project_name', full_name='ymir.backend.TaskReqLabeling.project_name', index=2, + number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='export_annotation', full_name='ymir.backend.TaskReqLabeling.export_annotation', index=5, - number=6, type=8, cpp_type=7, label=1, + name='export_annotation', full_name='ymir.backend.TaskReqLabeling.export_annotation', index=3, + number=4, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2646, - serialized_end=2812, -) - - -_TASKREQFUSION = _descriptor.Descriptor( - name='TaskReqFusion', - full_name='ymir.backend.TaskReqFusion', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='in_dataset_ids', full_name='ymir.backend.TaskReqFusion.in_dataset_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='ex_dataset_ids', full_name='ymir.backend.TaskReqFusion.ex_dataset_ids', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='merge_strategy', full_name='ymir.backend.TaskReqFusion.merge_strategy', index=2, - number=3, type=14, cpp_type=8, label=1, + name='annotation_type', full_name='ymir.backend.TaskReqLabeling.annotation_type', index=4, + number=5, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='in_class_ids', full_name='ymir.backend.TaskReqFusion.in_class_ids', index=3, - number=4, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='ex_class_ids', full_name='ymir.backend.TaskReqFusion.ex_class_ids', index=4, - number=5, type=5, cpp_type=1, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='count', full_name='ymir.backend.TaskReqFusion.count', index=5, - number=6, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='rate', full_name='ymir.backend.TaskReqFusion.rate', index=6, - number=7, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], @@ -1420,27 +1124,22 @@ syntax='proto3', extension_ranges=[], oneofs=[ - _descriptor.OneofDescriptor( - name='sampling', full_name='ymir.backend.TaskReqFusion.sampling', - index=0, containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[]), ], - serialized_start=2815, - serialized_end=3020, + serialized_start=2490, + serialized_end=2669, ) -_TASKREQMODELIMPORTING = _descriptor.Descriptor( - name='TaskReqModelImporting', - full_name='ymir.backend.TaskReqModelImporting', +_TASKREQIMPORTMODEL = _descriptor.Descriptor( + name='TaskReqImportModel', + full_name='ymir.backend.TaskReqImportModel', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='model_package_path', full_name='ymir.backend.TaskReqModelImporting.model_package_path', index=0, + name='model_package_path', full_name='ymir.backend.TaskReqImportModel.model_package_path', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -1458,8 +1157,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3022, - serialized_end=3073, + serialized_start=2671, + serialized_end=2719, ) @@ -1497,8 +1196,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3177, - serialized_end=3270, + serialized_start=2823, + serialized_end=2915, ) _RESPCMDINFERENCE = _descriptor.Descriptor( @@ -1528,153 +1227,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3076, - serialized_end=3270, -) - - -_SINGLEIMAGEANNOTATIONS = _descriptor.Descriptor( - name='SingleImageAnnotations', - full_name='ymir.backend.SingleImageAnnotations', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='annotations', full_name='ymir.backend.SingleImageAnnotations.annotations', index=0, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3272, - serialized_end=3343, -) - - -_ANNOTATION = _descriptor.Descriptor( - name='Annotation', - full_name='ymir.backend.Annotation', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='index', full_name='ymir.backend.Annotation.index', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='box', full_name='ymir.backend.Annotation.box', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='class_id', full_name='ymir.backend.Annotation.class_id', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='score', full_name='ymir.backend.Annotation.score', index=3, - number=4, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='class_name', full_name='ymir.backend.Annotation.class_name', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3345, - serialized_end=3458, -) - - -_RECT = _descriptor.Descriptor( - name='Rect', - full_name='ymir.backend.Rect', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='x', full_name='ymir.backend.Rect.x', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='y', full_name='ymir.backend.Rect.y', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='w', full_name='ymir.backend.Rect.w', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='h', full_name='ymir.backend.Rect.h', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3460, - serialized_end=3510, + serialized_start=2722, + serialized_end=2915, ) @@ -1693,6 +1247,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ymir_version', full_name='ymir.backend.LabelCollection.ymir_version', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], @@ -1705,8 +1266,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3512, - serialized_end=3566, + serialized_start=2917, + serialized_end=2993, ) @@ -1765,61 +1326,15 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3568, - serialized_end=3660, -) - - -_EVALUATECONFIG = _descriptor.Descriptor( - name='EvaluateConfig', - full_name='ymir.backend.EvaluateConfig', - filename=None, - file=DESCRIPTOR, - containing_type=None, - create_key=_descriptor._internal_create_key, - fields=[ - _descriptor.FieldDescriptor( - name='conf_thr', full_name='ymir.backend.EvaluateConfig.conf_thr', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='iou_thrs_interval', full_name='ymir.backend.EvaluateConfig.iou_thrs_interval', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='need_pr_curve', full_name='ymir.backend.EvaluateConfig.need_pr_curve', index=2, - number=3, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - serialized_options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=3662, - serialized_end=3746, + serialized_start=2995, + serialized_end=3087, ) _GENERALREQ.fields_by_name['req_type'].enum_type = _REQUESTTYPE _GENERALREQ.fields_by_name['merge_strategy'].enum_type = _MERGESTRATEGY -_GENERALREQ.fields_by_name['terminated_task_type'].enum_type = _TASKTYPE +_GENERALREQ.fields_by_name['terminated_task_type'].enum_type = mir__command__pb2._TASKTYPE _GENERALREQ.fields_by_name['label_collection'].message_type = _LABELCOLLECTION -_GENERALREQ.fields_by_name['evaluate_config'].message_type = _EVALUATECONFIG +_GENERALREQ.fields_by_name['evaluate_config'].message_type = mir__command__pb2._EVALUATECONFIG _GENERALREQ.fields_by_name['req_create_task'].message_type = _REQCREATETASK _GENERALREQ.oneofs_by_name['sampling'].fields.append( _GENERALREQ.fields_by_name['sampling_count']) @@ -1831,60 +1346,42 @@ _GENERALRESP.fields_by_name['docker_image_config'].message_type = _GENERALRESP_DOCKERIMAGECONFIGENTRY _GENERALRESP.fields_by_name['label_collection'].message_type = _LABELCOLLECTION _GENERALRESP.fields_by_name['detection'].message_type = _RESPCMDINFERENCE -_REQCREATETASK.fields_by_name['task_type'].enum_type = _TASKTYPE -_REQCREATETASK.fields_by_name['filter'].message_type = _TASKREQFILTER +_GENERALRESP.fields_by_name['evaluation'].message_type = mir__command__pb2._EVALUATION +_REQCREATETASK.fields_by_name['task_type'].enum_type = mir__command__pb2._TASKTYPE _REQCREATETASK.fields_by_name['training'].message_type = _TASKREQTRAINING _REQCREATETASK.fields_by_name['mining'].message_type = _TASKREQMINING -_REQCREATETASK.fields_by_name['importing'].message_type = _TASKREQIMPORTING +_REQCREATETASK.fields_by_name['import_dataset'].message_type = _TASKREQIMPORTDATASET _REQCREATETASK.fields_by_name['exporting'].message_type = _TASKREQEXPORTING -_REQCREATETASK.fields_by_name['inference'].message_type = _TASKREQINFERENCE _REQCREATETASK.fields_by_name['copy'].message_type = _TASKREQCOPYDATA _REQCREATETASK.fields_by_name['labeling'].message_type = _TASKREQLABELING -_REQCREATETASK.fields_by_name['fusion'].message_type = _TASKREQFUSION -_REQCREATETASK.fields_by_name['model_importing'].message_type = _TASKREQMODELIMPORTING -_TASKREQTRAINING_TRAININGDATASETTYPE.fields_by_name['dataset_type'].enum_type = _TVTTYPE +_REQCREATETASK.fields_by_name['import_model'].message_type = _TASKREQIMPORTMODEL +_TASKREQTRAINING_TRAININGDATASETTYPE.fields_by_name['dataset_type'].enum_type = mir__command__pb2._TVTTYPE _TASKREQTRAINING_TRAININGDATASETTYPE.containing_type = _TASKREQTRAINING _TASKREQTRAINING.fields_by_name['in_dataset_types'].message_type = _TASKREQTRAINING_TRAININGDATASETTYPE -_TASKREQEXPORTING.fields_by_name['format'].enum_type = _LABELFORMAT -_TASKREQFUSION.fields_by_name['merge_strategy'].enum_type = _MERGESTRATEGY -_TASKREQFUSION.oneofs_by_name['sampling'].fields.append( - _TASKREQFUSION.fields_by_name['count']) -_TASKREQFUSION.fields_by_name['count'].containing_oneof = _TASKREQFUSION.oneofs_by_name['sampling'] -_TASKREQFUSION.oneofs_by_name['sampling'].fields.append( - _TASKREQFUSION.fields_by_name['rate']) -_TASKREQFUSION.fields_by_name['rate'].containing_oneof = _TASKREQFUSION.oneofs_by_name['sampling'] -_RESPCMDINFERENCE_IMAGEANNOTATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIMAGEANNOTATIONS +_TASKREQIMPORTDATASET.fields_by_name['unknown_types_strategy'].enum_type = _UNKNOWNTYPESSTRATEGY +_TASKREQEXPORTING.fields_by_name['format'].enum_type = mir__command__pb2._ANNOFORMAT +_TASKREQLABELING.fields_by_name['annotation_type'].enum_type = _ANNOTATIONTYPE +_RESPCMDINFERENCE_IMAGEANNOTATIONSENTRY.fields_by_name['value'].message_type = mir__command__pb2._SINGLEIMAGEANNOTATIONS _RESPCMDINFERENCE_IMAGEANNOTATIONSENTRY.containing_type = _RESPCMDINFERENCE _RESPCMDINFERENCE.fields_by_name['image_annotations'].message_type = _RESPCMDINFERENCE_IMAGEANNOTATIONSENTRY -_SINGLEIMAGEANNOTATIONS.fields_by_name['annotations'].message_type = _ANNOTATION -_ANNOTATION.fields_by_name['box'].message_type = _RECT _LABELCOLLECTION.fields_by_name['labels'].message_type = _LABEL DESCRIPTOR.message_types_by_name['GeneralReq'] = _GENERALREQ DESCRIPTOR.message_types_by_name['GeneralResp'] = _GENERALRESP DESCRIPTOR.message_types_by_name['ReqCreateTask'] = _REQCREATETASK -DESCRIPTOR.message_types_by_name['TaskReqFilter'] = _TASKREQFILTER DESCRIPTOR.message_types_by_name['TaskReqTraining'] = _TASKREQTRAINING DESCRIPTOR.message_types_by_name['TaskReqMining'] = _TASKREQMINING -DESCRIPTOR.message_types_by_name['TaskReqImporting'] = _TASKREQIMPORTING +DESCRIPTOR.message_types_by_name['TaskReqImportDataset'] = _TASKREQIMPORTDATASET DESCRIPTOR.message_types_by_name['TaskReqExporting'] = _TASKREQEXPORTING -DESCRIPTOR.message_types_by_name['TaskReqInference'] = _TASKREQINFERENCE DESCRIPTOR.message_types_by_name['TaskReqCopyData'] = _TASKREQCOPYDATA DESCRIPTOR.message_types_by_name['TaskReqLabeling'] = _TASKREQLABELING -DESCRIPTOR.message_types_by_name['TaskReqFusion'] = _TASKREQFUSION -DESCRIPTOR.message_types_by_name['TaskReqModelImporting'] = _TASKREQMODELIMPORTING +DESCRIPTOR.message_types_by_name['TaskReqImportModel'] = _TASKREQIMPORTMODEL DESCRIPTOR.message_types_by_name['RespCMDInference'] = _RESPCMDINFERENCE -DESCRIPTOR.message_types_by_name['SingleImageAnnotations'] = _SINGLEIMAGEANNOTATIONS -DESCRIPTOR.message_types_by_name['Annotation'] = _ANNOTATION -DESCRIPTOR.message_types_by_name['Rect'] = _RECT DESCRIPTOR.message_types_by_name['LabelCollection'] = _LABELCOLLECTION DESCRIPTOR.message_types_by_name['Label'] = _LABEL -DESCRIPTOR.message_types_by_name['EvaluateConfig'] = _EVALUATECONFIG -DESCRIPTOR.enum_types_by_name['TvtType'] = _TVTTYPE -DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE -DESCRIPTOR.enum_types_by_name['LabelFormat'] = _LABELFORMAT -DESCRIPTOR.enum_types_by_name['MirStorage'] = _MIRSTORAGE DESCRIPTOR.enum_types_by_name['MergeStrategy'] = _MERGESTRATEGY +DESCRIPTOR.enum_types_by_name['UnknownTypesStrategy'] = _UNKNOWNTYPESSTRATEGY DESCRIPTOR.enum_types_by_name['RequestType'] = _REQUESTTYPE +DESCRIPTOR.enum_types_by_name['AnnotationType'] = _ANNOTATIONTYPE _sym_db.RegisterFileDescriptor(DESCRIPTOR) GeneralReq = _reflection.GeneratedProtocolMessageType('GeneralReq', (_message.Message,), { @@ -1916,13 +1413,6 @@ }) _sym_db.RegisterMessage(ReqCreateTask) -TaskReqFilter = _reflection.GeneratedProtocolMessageType('TaskReqFilter', (_message.Message,), { - 'DESCRIPTOR' : _TASKREQFILTER, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqFilter) - }) -_sym_db.RegisterMessage(TaskReqFilter) - TaskReqTraining = _reflection.GeneratedProtocolMessageType('TaskReqTraining', (_message.Message,), { 'TrainingDatasetType' : _reflection.GeneratedProtocolMessageType('TrainingDatasetType', (_message.Message,), { @@ -1945,12 +1435,12 @@ }) _sym_db.RegisterMessage(TaskReqMining) -TaskReqImporting = _reflection.GeneratedProtocolMessageType('TaskReqImporting', (_message.Message,), { - 'DESCRIPTOR' : _TASKREQIMPORTING, +TaskReqImportDataset = _reflection.GeneratedProtocolMessageType('TaskReqImportDataset', (_message.Message,), { + 'DESCRIPTOR' : _TASKREQIMPORTDATASET, '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqImporting) + # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqImportDataset) }) -_sym_db.RegisterMessage(TaskReqImporting) +_sym_db.RegisterMessage(TaskReqImportDataset) TaskReqExporting = _reflection.GeneratedProtocolMessageType('TaskReqExporting', (_message.Message,), { 'DESCRIPTOR' : _TASKREQEXPORTING, @@ -1959,13 +1449,6 @@ }) _sym_db.RegisterMessage(TaskReqExporting) -TaskReqInference = _reflection.GeneratedProtocolMessageType('TaskReqInference', (_message.Message,), { - 'DESCRIPTOR' : _TASKREQINFERENCE, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqInference) - }) -_sym_db.RegisterMessage(TaskReqInference) - TaskReqCopyData = _reflection.GeneratedProtocolMessageType('TaskReqCopyData', (_message.Message,), { 'DESCRIPTOR' : _TASKREQCOPYDATA, '__module__' : 'backend_pb2' @@ -1980,19 +1463,12 @@ }) _sym_db.RegisterMessage(TaskReqLabeling) -TaskReqFusion = _reflection.GeneratedProtocolMessageType('TaskReqFusion', (_message.Message,), { - 'DESCRIPTOR' : _TASKREQFUSION, +TaskReqImportModel = _reflection.GeneratedProtocolMessageType('TaskReqImportModel', (_message.Message,), { + 'DESCRIPTOR' : _TASKREQIMPORTMODEL, '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqFusion) + # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqImportModel) }) -_sym_db.RegisterMessage(TaskReqFusion) - -TaskReqModelImporting = _reflection.GeneratedProtocolMessageType('TaskReqModelImporting', (_message.Message,), { - 'DESCRIPTOR' : _TASKREQMODELIMPORTING, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.TaskReqModelImporting) - }) -_sym_db.RegisterMessage(TaskReqModelImporting) +_sym_db.RegisterMessage(TaskReqImportModel) RespCMDInference = _reflection.GeneratedProtocolMessageType('RespCMDInference', (_message.Message,), { @@ -2009,27 +1485,6 @@ _sym_db.RegisterMessage(RespCMDInference) _sym_db.RegisterMessage(RespCMDInference.ImageAnnotationsEntry) -SingleImageAnnotations = _reflection.GeneratedProtocolMessageType('SingleImageAnnotations', (_message.Message,), { - 'DESCRIPTOR' : _SINGLEIMAGEANNOTATIONS, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.SingleImageAnnotations) - }) -_sym_db.RegisterMessage(SingleImageAnnotations) - -Annotation = _reflection.GeneratedProtocolMessageType('Annotation', (_message.Message,), { - 'DESCRIPTOR' : _ANNOTATION, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.Annotation) - }) -_sym_db.RegisterMessage(Annotation) - -Rect = _reflection.GeneratedProtocolMessageType('Rect', (_message.Message,), { - 'DESCRIPTOR' : _RECT, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.Rect) - }) -_sym_db.RegisterMessage(Rect) - LabelCollection = _reflection.GeneratedProtocolMessageType('LabelCollection', (_message.Message,), { 'DESCRIPTOR' : _LABELCOLLECTION, '__module__' : 'backend_pb2' @@ -2044,16 +1499,9 @@ }) _sym_db.RegisterMessage(Label) -EvaluateConfig = _reflection.GeneratedProtocolMessageType('EvaluateConfig', (_message.Message,), { - 'DESCRIPTOR' : _EVALUATECONFIG, - '__module__' : 'backend_pb2' - # @@protoc_insertion_point(class_scope:ymir.backend.EvaluateConfig) - }) -_sym_db.RegisterMessage(EvaluateConfig) - +DESCRIPTOR._options = None _GENERALRESP_DOCKERIMAGECONFIGENTRY._options = None -_REQCREATETASK.fields_by_name['filter']._options = None _RESPCMDINFERENCE_IMAGEANNOTATIONSENTRY._options = None _MIR_CONTROLLER_SERVICE = _descriptor.ServiceDescriptor( @@ -2063,8 +1511,8 @@ index=0, serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_start=4931, - serialized_end=5033, + serialized_start=3798, + serialized_end=3900, methods=[ _descriptor.MethodDescriptor( name='data_manage_request', diff --git a/ymir/backend/src/common/proto/backend_pb2_grpc.py b/ymir/backend/src/common/proto/backend_pb2_grpc.py index 7d776f457c..cac68ea509 100644 --- a/ymir/backend/src/common/proto/backend_pb2_grpc.py +++ b/ymir/backend/src/common/proto/backend_pb2_grpc.py @@ -25,268 +25,7 @@ class mir_controller_serviceServicer(object): """Missing associated documentation comment in .proto file.""" def data_manage_request(self, request, context): - """ - APIS FOR DATA MANAGEMENT - KEY CONCEPTS - sandbox: sandbox = sandbox_dir + docker_container - sandbox_dir = sandbox_root + user_name - docker_container = container of docker_image - where sandbox_root and docker_image are get from cli args - one user should have only one sandbox - but can have multiple mir repos in this sandbox - - CREATE_SANDBOX - creates a sandbox for a single user - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox already exists - other system errors occurred - - REMOVE_SANDBOX - removes a sandbox for a single user - it also removes all contents in the sandbox - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox not exists - other system errors occurred - - START_SANDBOX - starts a sandbox for a single user - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox not exists - sandbox already started - other docker errors occurred - - STOP_SANDBOX - stops a sandbox for a single user - Args: - GeneralReq.user: user name for this sandbox - Returns: - 0: success - errors when: - sandbox not exists - sandbox already stopped - other docker errors occurred - - INIT - init a new mir repo in a running sandbox - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - already have mir repo with the same name - other docker errors occurred - other mir errors occurred - - BRANCH_LIST - list all branches in running sandbox for user - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_bool: if true, lists remote branches - if false, lists local branches - Returns: - 0: success - ext_strs: branches - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - BRANCH_DEL - remove one branch in running sandbox for user - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: branch to be deleted - GeneralReq.ext_bool: force delete even if this branch has not been merged yet - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - branch not found - branch not merged if ext_bool is false - other docker errors occurred - other mir errors occurred - - CHECKOUT_COMMIT - checkout to another commit, or to another branch, or to another tag - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: branch name, tag name or commit id - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - branch, tag or commit not found - other docker errors occurred - other mir errors occurred - - CHECKOUT_BRANCH - create a new branch in a running sandbox for user - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: new branch name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - new branch name already exists - other docker errors occurred - other mir errors occurred - - CLONE - clones a mir repo from server - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo url - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo url not available - other docker errors occurred - other mir errors occurred - - COMMIT - commit changes for mir repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: commit messages, multi lines enabled - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - commit messages empty, or contains only spaces, tabs or line breaks - other docker errors occurred - other mir errors occurred - - FILTER - filter assets (currently by asset keywords) in mir repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_str: predicates, keywords separated by commas or semicolons - comma means AND - semicolon means OR - for example: `person; cat, dog` means to filter assets which - have person, or have both cat and dog in asset keywords list - attention that comma (means AND) has higher priority - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - predicate empty - other docker errors occurred - other mir errors occurred - - LOG - get log from repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - GeneralResp.ext_strs: log infos - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - MERGE - merges current repo with another - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_int32: merge stragety, 0: MIX, 1: GUEST - GeneralReq.ext_str: guest branch name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - PULL - pulls (updates) contents from server - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - PUSH - pushes local commits to server - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - GeneralReq.ext_bool: creates new branch on server - Returns: - 0: success - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - - RESET: currently not available - - STATUS - shows status of current repo - Args: - GeneralReq.user: user name for this sandbox - GeneralReq.repo: repo name - Returns: - 0: success - GeneralResp.message: summary of current repo - errors when: - sandbox not exists - sandbox not running - repo not found - other docker errors occurred - other mir errors occurred - """ + """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') diff --git a/ymir/backend/src/common/proto/backend_pb2_utils.py b/ymir/backend/src/common/proto/backend_pb2_utils.py new file mode 100644 index 0000000000..12b24bfedf --- /dev/null +++ b/ymir/backend/src/common/proto/backend_pb2_utils.py @@ -0,0 +1,28 @@ +import enum +from proto import backend_pb2 + + +class UnknownTypesStrategyStr(str, enum.Enum): + STOP = 'stop' + IGNORE = 'ignore' + ADD = 'add' + + +def unknown_types_strategy_str_from_enum( + unknown_types_strategy: backend_pb2.UnknownTypesStrategy) -> UnknownTypesStrategyStr: + mapping = { + backend_pb2.UnknownTypesStrategy.UTS_STOP: UnknownTypesStrategyStr.STOP, + backend_pb2.UnknownTypesStrategy.UTS_IGNORE: UnknownTypesStrategyStr.IGNORE, + backend_pb2.UnknownTypesStrategy.UTS_ADD: UnknownTypesStrategyStr.ADD, + } + return mapping[unknown_types_strategy] + + +def unknown_types_strategy_enum_from_str( + unknown_types_strategy: UnknownTypesStrategyStr) -> backend_pb2.UnknownTypesStrategy: + mapping = { + UnknownTypesStrategyStr.STOP: backend_pb2.UnknownTypesStrategy.UTS_STOP, + UnknownTypesStrategyStr.IGNORE: backend_pb2.UnknownTypesStrategy.UTS_IGNORE, + UnknownTypesStrategyStr.ADD: backend_pb2.UnknownTypesStrategy.UTS_ADD, + } + return mapping[unknown_types_strategy] diff --git a/ymir/backend/src/common/proto/build-proto.sh b/ymir/backend/src/common/proto/build-proto.sh index abc507c4dc..f7428b9d93 100755 --- a/ymir/backend/src/common/proto/build-proto.sh +++ b/ymir/backend/src/common/proto/build-proto.sh @@ -1,13 +1,24 @@ #!/bin/bash set -e -INPUT_DIR=./ -OUTPUT_DIR=./ +# pip/conda(mac) install grpcio==1.38.0 +# pip install grpcio_tools==1.38.0 +# go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 +# go install google.golang.org/grpc/cmd/protoc-gen-go-grpc + +INPUT_DIR="./" +PY_OUTPUT_DIR="./" +GO_OUTPUT_DIR_HEL="../../ymir_hel/" python -m grpc_tools.protoc \ -I "$INPUT_DIR" \ - --grpc_python_out=$OUTPUT_DIR \ - --python_out=$OUTPUT_DIR \ - "$INPUT_DIR/backend.proto" + -I "../../../../command/proto/" \ + --grpc_python_out=${PY_OUTPUT_DIR} \ + --python_out=${PY_OUTPUT_DIR} \ + --go_out="${GO_OUTPUT_DIR_HEL}" \ + --go-grpc_out="${GO_OUTPUT_DIR_HEL}" \ + --go-grpc_opt=require_unimplemented_servers=false \ + "${INPUT_DIR}/backend.proto" -sed -i -r 's/^import (.*_pb2.*)/from proto import \1/g' $OUTPUT_DIR/*_pb2*.py +sed -i.bak -r 's/^import (.*_pb2.*)/from mir.protos import \1/g' ${PY_OUTPUT_DIR}/*_pb2.py && rm *.bak +sed -i.bak -r 's/^import (.*_pb2.*)/from proto import \1/g' ${PY_OUTPUT_DIR}/*_pb2_grpc.py && rm *.bak diff --git a/ymir/backend/src/health_check.sh b/ymir/backend/src/health_check.sh new file mode 100644 index 0000000000..2f38bad9ca --- /dev/null +++ b/ymir/backend/src/health_check.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +set -e + +# App check +curl --fail -s http://localhost:80/health +# Viewer check +curl --fail -s http://localhost:9527/health diff --git a/ymir/backend/src/ymir_app/alembic/env.py b/ymir/backend/src/ymir_app/alembic/env.py index bd0e0e4266..d938a37994 100644 --- a/ymir/backend/src/ymir_app/alembic/env.py +++ b/ymir/backend/src/ymir_app/alembic/env.py @@ -1,11 +1,20 @@ from __future__ import with_statement +from contextlib import contextmanager import os +import time +import logging from logging.config import fileConfig +import uuid +import subprocess +from typing import Dict, Optional, Generator from alembic import context from sqlalchemy import engine_from_config, pool +from app.db.base import Base # noqa + + # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config @@ -20,8 +29,6 @@ # target_metadata = mymodel.Base.metadata # target_metadata = None -from app.db.base import Base # noqa - target_metadata = Base.metadata # other values from the config, defined by the needs of env.py, @@ -34,6 +41,60 @@ def get_url() -> str: return os.getenv("DATABASE_URI", "sqlite:///app.db") +def get_mysql_credentials() -> Dict: + credentials = { + "MYSQL_USER": os.getenv("MYSQL_INITIAL_USER"), + "MYSQL_PASSWORD": os.getenv("MYSQL_INITIAL_PASSWORD"), + "MYSQL_DATABASE": os.getenv("MYSQL_DATABASE"), + "MYSQL_HOST": "db", + } + if None in credentials.values(): + raise ValueError("Invalid MySQL Environments") + return credentials + + +def create_backup(backup_filename: str) -> None: + credentials = get_mysql_credentials() + mysqldump_command = ( + "mysqldump --host {MYSQL_HOST} -u {MYSQL_USER} -p{MYSQL_PASSWORD} --databases {MYSQL_DATABASE} --no-tablespaces --ignore-table {MYSQL_DATABASE}.alembic_version --result-file %s" + ).format(**credentials) + subprocess.run(mysqldump_command % backup_filename, shell=True, check=True) + + +def recover_from_backup(backup_filename: str) -> None: + credentials = get_mysql_credentials() + recover_command = "mysql --host {MYSQL_HOST} -u {MYSQL_USER} -p{MYSQL_PASSWORD} < %s".format(**credentials) + subprocess.run(recover_command % backup_filename, shell=True, check=True) + + +@contextmanager +def backup_database() -> Generator[None, None, None]: + current_alembic_version = get_current_alembic_version() + backup_filename: Optional[str] = None + if is_alembic_migration_command() and current_alembic_version: + # Only when alembic is upgrading or downgrading + # and legacy database exists, should we backup database + backup_filename = f"backup_{current_alembic_version}_{int(time.time())}_{uuid.uuid4().hex}.sql" + create_backup(backup_filename) + logging.info("Created MySQL backup to %s" % backup_filename) + try: + yield + except Exception as e: + if backup_filename: + recover_from_backup(backup_filename) + logging.info("Failed to upgrade database (%s), rollback with backup %s" % (e, backup_filename)) + + +def is_alembic_migration_command() -> bool: + command = context.config.cmd_opts.cmd[0].__name__ + return command in ["upgrade", "downgrade"] + + +def get_current_alembic_version() -> Optional[str]: + migration_context = context.get_context() + return migration_context.get_current_revision() + + def run_migrations_offline() -> None: """Run migrations in 'offline' mode. @@ -82,8 +143,9 @@ def run_migrations_online() -> None: render_as_batch=True, ) - with context.begin_transaction(): - context.run_migrations() + with backup_database(): + with context.begin_transaction(): + context.run_migrations() if context.is_offline_mode(): diff --git a/ymir/backend/src/ymir_app/alembic/versions/0478ce5b8f3f_add_organization_and_environment_in_.py b/ymir/backend/src/ymir_app/alembic/versions/0478ce5b8f3f_add_organization_and_environment_in_.py new file mode 100644 index 0000000000..66ba650b10 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/0478ce5b8f3f_add_organization_and_environment_in_.py @@ -0,0 +1,34 @@ +"""add organization and environment in user tbl + +Revision ID: 0478ce5b8f3f +Revises: 501414124392 +Create Date: 2022-06-27 10:48:30.195372 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "0478ce5b8f3f" +down_revision = "501414124392" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("user", schema=None) as batch_op: + batch_op.add_column(sa.Column("organization", sa.String(length=100), nullable=True)) + batch_op.add_column(sa.Column("scene", sa.String(length=500), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("user", schema=None) as batch_op: + batch_op.drop_column("scene") + batch_op.drop_column("organization") + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/0a65ca497350_add_model_stage.py b/ymir/backend/src/ymir_app/alembic/versions/0a65ca497350_add_model_stage.py new file mode 100644 index 0000000000..f02d6f71ad --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/0a65ca497350_add_model_stage.py @@ -0,0 +1,55 @@ +"""add model stage + +Revision ID: 0a65ca497350 +Revises: 3c495c9f691e +Create Date: 2022-06-12 11:52:44.995917 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "0a65ca497350" +down_revision = "3c495c9f691e" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("model", schema=None) as batch_op: + batch_op.add_column(sa.Column("recommended_stage", sa.Integer(), nullable=True)) + op.create_table( + "model_stage", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(length=100), nullable=True), + sa.Column("map", sa.Float(), nullable=True), + sa.Column("timestamp", sa.Float(), nullable=True), + sa.Column("model_id", sa.Integer(), nullable=False), + sa.Column("is_deleted", sa.Boolean(), nullable=False), + sa.Column("create_datetime", sa.DateTime(), nullable=False), + sa.Column("update_datetime", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + op.create_index(op.f("ix_model_stage_name"), "model_stage", ["name"], unique=False) + op.create_index(op.f("ix_model_stage_id"), "model_stage", ["id"], unique=False) + op.create_index( + op.f("ix_model_stage_model_id"), "model_stage", ["model_id"], unique=False + ) + with op.batch_alter_table("model_stage", schema=None) as batch_op: + batch_op.create_unique_constraint('uq_modelstage_name', ['model_id', 'name']) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("model", schema=None) as batch_op: + batch_op.drop_column("recommended_stage") + with op.batch_alter_table("model_stage", schema=None) as batch_op: + batch_op.drop_constraint("uq_modelstage_name", type_="unique") + op.drop_index(op.f("ix_model_stage_model_id"), table_name="model_stage") + op.drop_index(op.f("ix_model_stage_id"), table_name="model_stage") + op.drop_index(op.f("ix_model_stage_name"), table_name="model_stage") + op.drop_table("model_stage") + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/3693ba2b4220_task_add_dataset_id_and_model_id.py b/ymir/backend/src/ymir_app/alembic/versions/3693ba2b4220_task_add_dataset_id_and_model_id.py new file mode 100644 index 0000000000..b28d99c2e2 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/3693ba2b4220_task_add_dataset_id_and_model_id.py @@ -0,0 +1,44 @@ +"""[task] add dataset_id and model_id + +Revision ID: 3693ba2b4220 +Revises: 0a65ca497350 +Create Date: 2022-06-15 15:15:38.320432 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "3693ba2b4220" +down_revision = "0a65ca497350" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("model_stage", schema=None) as batch_op: + batch_op.alter_column("timestamp", existing_type=sa.FLOAT(), type_=sa.Integer(), nullable=False) + + with op.batch_alter_table("task", schema=None) as batch_op: + batch_op.add_column(sa.Column("dataset_id", sa.Integer(), nullable=True)) + batch_op.add_column(sa.Column("model_stage_id", sa.Integer(), nullable=True)) + batch_op.create_index(batch_op.f("ix_task_dataset_id"), ["dataset_id"], unique=False) + batch_op.create_index(batch_op.f("ix_task_model_stage_id"), ["model_stage_id"], unique=False) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("task", schema=None) as batch_op: + batch_op.drop_index(batch_op.f("ix_task_model_stage_id")) + batch_op.drop_index(batch_op.f("ix_task_dataset_id")) + batch_op.drop_column("model_stage_id") + batch_op.drop_column("dataset_id") + + with op.batch_alter_table("model_stage", schema=None) as batch_op: + batch_op.alter_column("timestamp", existing_type=sa.Integer(), type_=sa.FLOAT(), nullable=True) + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/501414124392_add_enable_livecode_to_docker_image_tbl.py b/ymir/backend/src/ymir_app/alembic/versions/501414124392_add_enable_livecode_to_docker_image_tbl.py new file mode 100644 index 0000000000..d991e7d660 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/501414124392_add_enable_livecode_to_docker_image_tbl.py @@ -0,0 +1,40 @@ +"""add enable_livecode to docker_image tbl + +Revision ID: 501414124392 +Revises: e2fe87f35cf4 +Create Date: 2022-06-24 23:20:12.442721 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '501414124392' +down_revision = 'e2fe87f35cf4' +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('docker_image', schema=None) as batch_op: + batch_op.add_column(sa.Column("enable_livecode", sa.Boolean(), nullable=False, server_default="0")) + + with op.batch_alter_table('project', schema=None) as batch_op: + batch_op.drop_index('ix_project_testing_dataset_id') + batch_op.create_index(batch_op.f('ix_project_validation_dataset_id'), ['validation_dataset_id'], unique=False) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('project', schema=None) as batch_op: + batch_op.drop_index(batch_op.f('ix_project_validation_dataset_id')) + batch_op.create_index('ix_project_testing_dataset_id', ['validation_dataset_id'], unique=False) + + with op.batch_alter_table('docker_image', schema=None) as batch_op: + batch_op.drop_column('enable_livecode') + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/596c563041eb_add_model_stage_id_to_project_and_.py b/ymir/backend/src/ymir_app/alembic/versions/596c563041eb_add_model_stage_id_to_project_and_.py new file mode 100644 index 0000000000..d6bf9ccca0 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/596c563041eb_add_model_stage_id_to_project_and_.py @@ -0,0 +1,40 @@ +"""add model_stage_id to project and iteration + +Revision ID: 596c563041eb +Revises: 3693ba2b4220 +Create Date: 2022-06-17 13:53:41.751181 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "596c563041eb" +down_revision = "3693ba2b4220" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("iteration", schema=None) as batch_op: + batch_op.add_column(sa.Column("training_output_model_stage_id", sa.Integer(), nullable=True)) + + with op.batch_alter_table("project", schema=None) as batch_op: + batch_op.add_column(sa.Column("initial_model_stage_id", sa.Integer(), nullable=True)) + batch_op.create_index(batch_op.f("ix_project_initial_model_stage_id"), ["initial_model_stage_id"], unique=False) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("project", schema=None) as batch_op: + batch_op.drop_index(batch_op.f("ix_project_initial_model_stage_id")) + batch_op.drop_column("initial_model_stage_id") + + with op.batch_alter_table("iteration", schema=None) as batch_op: + batch_op.drop_column("training_output_model_stage_id") + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/9bb7bb8b71c3_add_mining_dataset_id_to_iteration_tbl.py b/ymir/backend/src/ymir_app/alembic/versions/9bb7bb8b71c3_add_mining_dataset_id_to_iteration_tbl.py new file mode 100644 index 0000000000..37c8c6ce63 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/9bb7bb8b71c3_add_mining_dataset_id_to_iteration_tbl.py @@ -0,0 +1,30 @@ +"""add mining_dataset_id to iteration tbl + +Revision ID: 9bb7bb8b71c3 +Revises: a5b7b9a297b2 +Create Date: 2022-09-09 11:13:10.508069 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "9bb7bb8b71c3" +down_revision = "a5b7b9a297b2" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("iteration", schema=None) as batch_op: + batch_op.add_column(sa.Column("mining_dataset_id", sa.Integer(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("iteration", schema=None) as batch_op: + batch_op.drop_column("mining_dataset_id") + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/a5b7b9a297b2_add_keywords_to_model_tbl.py b/ymir/backend/src/ymir_app/alembic/versions/a5b7b9a297b2_add_keywords_to_model_tbl.py new file mode 100644 index 0000000000..3c77dcfffb --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/a5b7b9a297b2_add_keywords_to_model_tbl.py @@ -0,0 +1,31 @@ +"""add keywords to model tbl + +Revision ID: a5b7b9a297b2 +Revises: e2503210ac29 +Create Date: 2022-07-21 10:27:11.780720 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "a5b7b9a297b2" +down_revision = "e2503210ac29" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("model", schema=None) as batch_op: + batch_op.add_column(sa.Column("keywords", sa.Text(length=20000), nullable=True)) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("model", schema=None) as batch_op: + batch_op.drop_column("keywords") + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/c91513775753_add_iteration_step_table.py b/ymir/backend/src/ymir_app/alembic/versions/c91513775753_add_iteration_step_table.py new file mode 100644 index 0000000000..29d143b7fb --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/c91513775753_add_iteration_step_table.py @@ -0,0 +1,116 @@ +"""add iteration_step table + +Revision ID: c91513775753 +Revises: 9bb7bb8b71c3 +Create Date: 2022-10-14 14:50:55.795359 + +""" +from typing import Any, List, Optional + +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "c91513775753" +down_revision = "9bb7bb8b71c3" +branch_labels = None +depends_on = None + + +def get_record(conn: Any, table: str, id_: Optional[int]) -> Any: + if id_ is None: + return None + result = conn.execute(f"SELECT * FROM {table} WHERE id = {id_}").fetchall() + return result[0] if result else None + + +def parse_steps_info_from_iteration(conn: Any, iteration: Any) -> List: + """ + parse necessary information for each steps from iteration: + + - prepare_mining -> mining_input_dataset_id + - mining -> mining_output_dataset_id + - label -> label_output_dataset_id + - prepare_training -> training_input_dataset_id + - training -> training_output_model_id + """ + prepare_mining_result = get_record(conn, "dataset", iteration.mining_input_dataset_id) + mining_result = get_record(conn, "dataset", iteration.mining_output_dataset_id) + label_result = get_record(conn, "dataset", iteration.label_output_dataset_id) + prepare_training_result = get_record(conn, "dataset", iteration.training_input_dataset_id) + training_result = get_record(conn, "model", iteration.training_output_model_id) + + steps = [ + { + "name": "prepare_mining", + "task_type": 11, + "task_id": prepare_mining_result.task_id if prepare_mining_result else None, + }, + {"name": "mining", "task_type": 2, "task_id": mining_result.task_id if mining_result else None}, + {"name": "label", "task_type": 3, "task_id": label_result.task_id if label_result else None}, + { + "name": "prepare_training", + "task_type": 11, + "task_id": prepare_training_result.task_id if prepare_training_result else None, + }, + {"name": "training", "task_type": 1, "task_id": training_result.task_id if training_result else None}, + ] + for step in steps: + step.update( + { + "iteration_id": iteration.id, + "create_datetime": iteration.create_datetime, + "update_datetime": iteration.update_datetime, + "is_deleted": False, + "is_finished": True, + } + ) + return steps + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + iteration_step_table = op.create_table( + "iteration_step", + sa.Column("id", sa.Integer(), autoincrement=True, nullable=False), + sa.Column("name", sa.String(length=100), nullable=False), + sa.Column("iteration_id", sa.Integer(), nullable=False), + sa.Column("task_type", sa.Integer(), nullable=False), + sa.Column("task_id", sa.Integer(), nullable=True), + sa.Column("serialized_presetting", sa.Text(length=20000), nullable=True), + sa.Column("is_finished", sa.Boolean(), nullable=False), + sa.Column("is_deleted", sa.Boolean(), nullable=False), + sa.Column("create_datetime", sa.DateTime(), nullable=False), + sa.Column("update_datetime", sa.DateTime(), nullable=False), + sa.PrimaryKeyConstraint("id"), + ) + with op.batch_alter_table("iteration_step", schema=None) as batch_op: + batch_op.create_index(batch_op.f("ix_iteration_step_id"), ["id"], unique=False) + batch_op.create_index(batch_op.f("ix_iteration_step_iteration_id"), ["iteration_id"], unique=False) + batch_op.create_index(batch_op.f("ix_iteration_step_name"), ["name"], unique=False) + batch_op.create_index(batch_op.f("ix_iteration_step_task_id"), ["task_id"], unique=False) + batch_op.create_index(batch_op.f("ix_iteration_step_task_type"), ["task_type"], unique=False) + + conn = op.get_bind() + try: + # copy data from iteration table to new created iteration_step table + iterations = conn.execute("SELECT * FROM iteration").fetchall() + steps_data = [step for iteration in iterations for step in parse_steps_info_from_iteration(conn, iteration)] + op.bulk_insert(iteration_step_table, steps_data) + except Exception as e: + print("Could not migrate iteration data to iteration_step, skip: %s" % e) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("iteration_step", schema=None) as batch_op: + batch_op.drop_index(batch_op.f("ix_iteration_step_task_type")) + batch_op.drop_index(batch_op.f("ix_iteration_step_task_id")) + batch_op.drop_index(batch_op.f("ix_iteration_step_name")) + batch_op.drop_index(batch_op.f("ix_iteration_step_iteration_id")) + batch_op.drop_index(batch_op.f("ix_iteration_step_id")) + + op.drop_table("iteration_step") + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/e2503210ac29_add_candidate_training_dataset_id_to_.py b/ymir/backend/src/ymir_app/alembic/versions/e2503210ac29_add_candidate_training_dataset_id_to_.py new file mode 100644 index 0000000000..70270627cf --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/e2503210ac29_add_candidate_training_dataset_id_to_.py @@ -0,0 +1,30 @@ +"""add candidate_training_dataset_id to project tbl + +Revision ID: e2503210ac29 +Revises: badf991582d0 +Create Date: 2022-07-20 10:48:50.275863 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "e2503210ac29" +down_revision = "0478ce5b8f3f" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("project", schema=None) as batch_op: + batch_op.add_column(sa.Column("candidate_training_dataset_id", sa.Integer(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("project", schema=None) as batch_op: + batch_op.drop_column("candidate_training_dataset_id") + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/e2fe87f35cf4_update_project.py b/ymir/backend/src/ymir_app/alembic/versions/e2fe87f35cf4_update_project.py new file mode 100644 index 0000000000..d7fae1471e --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/e2fe87f35cf4_update_project.py @@ -0,0 +1,40 @@ +"""update project + +Revision ID: e2fe87f35cf4 +Revises: 596c563041eb +Create Date: 2022-06-20 15:38:05.416763 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'e2fe87f35cf4' +down_revision = '596c563041eb' +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + op.alter_column('iteration', 'testing_dataset_id', new_column_name='validation_dataset_id', existing_type=sa.Integer()) + + with op.batch_alter_table('project', schema=None) as batch_op: + batch_op.add_column(sa.Column('testing_dataset_ids', sa.String(length=500), nullable=True)) + batch_op.add_column(sa.Column('enable_iteration', sa.Boolean(), nullable=False, default=True, server_default='1')) + op.alter_column('project', 'testing_dataset_id', new_column_name='validation_dataset_id', existing_type=sa.Integer()) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('project', schema=None) as batch_op: + batch_op.drop_column('enable_iteration') + batch_op.drop_column('testing_dataset_ids') + op.alter_column('project', 'validation_dataset_id', new_column_name='testing_dataset_id', existing_type=sa.Integer()) + + op.alter_column('iteration', 'validation_dataset_id', new_column_name='testing_dataset_id', existing_type=sa.Integer()) + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/api.py b/ymir/backend/src/ymir_app/app/api/api_v1/api.py index d5787685d4..bd53b8b3b2 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/api.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/api.py @@ -2,7 +2,6 @@ from app.api.api_v1.endpoints import ( datasets, - graphs, images, inferences, info, @@ -18,6 +17,7 @@ iterations, dataset_groups, model_groups, + model_stages, ) api_router = APIRouter() @@ -30,9 +30,9 @@ api_router.include_router(datasets.router, prefix="/datasets", tags=["datasets"]) api_router.include_router(model_groups.router, prefix="/model_groups", tags=["model_groups"]) api_router.include_router(models.router, prefix="/models", tags=["models"]) +api_router.include_router(model_stages.router, prefix="/model_stages", tags=["model_stages"]) api_router.include_router(tasks.router, prefix="/tasks", tags=["tasks"]) api_router.include_router(keywords.router, prefix="/keywords", tags=["keywords"]) -api_router.include_router(graphs.router, prefix="/graphs", tags=["graphs"]) api_router.include_router(images.router, prefix="/images", tags=["docker_images"]) api_router.include_router(inferences.router, prefix="/inferences", tags=["inference"]) api_router.include_router(roles.router, prefix="/roles", tags=["roles"]) diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py index fc60ae027d..f21aeb77f3 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py @@ -4,7 +4,6 @@ from typing import Any, Dict, Optional, List from fastapi import APIRouter, BackgroundTasks, Depends, Path, Query -from fastapi.encoders import jsonable_encoder from fastapi.logger import logger from sqlalchemy.orm import Session @@ -12,16 +11,17 @@ from app.api import deps from app.api.errors.errors import ( AssetNotFound, + ControllerError, + DatasetGroupNotFound, DatasetNotFound, DuplicateDatasetGroupError, NoDatasetPermission, - FailedtoCreateTask, FailedToHideProtectedResources, - DatasetGroupNotFound, + FailedToParseVizResponse, ProjectNotFound, MissingOperations, RefuseToProcessMixedOperations, - DatasetsNotInSameGroup, + RequiredFieldMissing, ) from app.config import settings from app.constants.state import TaskState, TaskType, ResultState @@ -29,31 +29,48 @@ from app.utils.ymir_controller import ControllerClient, gen_task_hash from app.utils.ymir_viz import VizClient from app.schemas.dataset import MergeStrategy -from app.libs.datasets import import_dataset_in_background, evaluate_dataset +from app.libs.datasets import ( + import_dataset_in_background, + evaluate_datasets, + ensure_datasets_are_ready, + send_keywords_metrics, +) from common_utils.labels import UserLabels router = APIRouter() -@router.get( - "/batch", - response_model=schemas.DatasetsOut, -) +@router.get("/batch", response_model=schemas.DatasetsAnalysesOut) def batch_get_datasets( db: Session = Depends(deps.get_db), - dataset_ids: str = Query(None, example="1,2,3", alias="ids"), + viz_client: VizClient = Depends(deps.get_viz_client), + project_id: int = Query(...), + dataset_ids: str = Query(..., example="1,2,3", alias="ids", min_length=1), + require_ck: bool = Query(False, alias="ck"), + require_hist: bool = Query(False, alias="hist"), + current_user: models.User = Depends(deps.get_current_active_user), + user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: ids = [int(i) for i in dataset_ids.split(",")] datasets = crud.dataset.get_multi_by_ids(db, ids=ids) - if not datasets: + if len(ids) != len(datasets): raise DatasetNotFound() - return {"result": datasets} - -@router.post( - "/batch", - response_model=schemas.DatasetsOut, -) + datasets_info = [schemas.dataset.DatasetInDB.from_orm(dataset).dict() for dataset in datasets] + if require_ck or require_hist: + viz_client.initialize(user_id=current_user.id, project_id=project_id, user_labels=user_labels) + for dataset in datasets_info: + if dataset["result_state"] != ResultState.ready: + continue + if require_ck: + dataset_extra_info = viz_client.get_dataset_info(dataset["hash"]) + elif require_hist: + dataset_extra_info = viz_client.get_dataset_analysis(dataset["hash"], require_hist=True) + dataset.update(dataset_extra_info) + return {"result": datasets_info} + + +@router.post("/batch", response_model=schemas.DatasetsOut) def batch_update_datasets( *, db: Session = Depends(deps.get_db), @@ -86,10 +103,7 @@ class SortField(enum.Enum): source = "source" -@router.get( - "/", - response_model=schemas.DatasetPaginationOut, -) +@router.get("/", response_model=schemas.DatasetPaginationOut) def list_datasets( db: Session = Depends(deps.get_db), source: TaskType = Query(None, description="type of related task"), @@ -127,10 +141,7 @@ def list_datasets( return {"result": {"total": total, "items": datasets}} -@router.get( - "/public", - response_model=schemas.DatasetPaginationOut, -) +@router.get("/public", response_model=schemas.DatasetPaginationOut) def get_public_datasets( db: Session = Depends(deps.get_db), current_user: models.User = Depends(deps.get_current_active_user), @@ -147,10 +158,7 @@ def get_public_datasets( return {"result": {"total": total, "items": datasets}} -@router.post( - "/importing", - response_model=schemas.DatasetOut, -) +@router.post("/importing", response_model=schemas.DatasetOut) def import_dataset( *, db: Session = Depends(deps.get_db), @@ -160,14 +168,16 @@ def import_dataset( background_tasks: BackgroundTasks, ) -> Any: """ - Create dataset. + Import dataset. Three Import Strategy: - no_annotations = 1 - ignore_unknown_annotations = 2 - stop_upon_unknown_annotations = 3 + - add unknown annotations = 4 """ # 1. check if dataset group name is available + logger.info("[import dataset] import dataset with payload: %s", dataset_import.json()) if crud.dataset_group.is_duplicated_name_in_project( db, project_id=dataset_import.project_id, name=dataset_import.group_name ): @@ -266,13 +276,17 @@ def delete_dataset( @router.get( "/{dataset_id}", - response_model=schemas.DatasetOut, + response_model=schemas.DatasetInfoOut, responses={404: {"description": "Dataset Not Found"}}, ) def get_dataset( db: Session = Depends(deps.get_db), dataset_id: int = Path(..., example="12"), + keywords_for_negative_info: str = Query(None, alias="keywords"), + verbose_info: bool = Query(False, alias="verbose"), current_user: models.User = Depends(deps.get_current_active_user), + viz_client: VizClient = Depends(deps.get_viz_client), + user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: """ Get verbose information of specific dataset @@ -280,7 +294,38 @@ def get_dataset( dataset = crud.dataset.get_by_user_and_id(db, user_id=current_user.id, id=dataset_id) if not dataset: raise DatasetNotFound() - return {"result": dataset} + + keyword_ids: Optional[List[int]] = None + if keywords_for_negative_info: + keywords = keywords_for_negative_info.split(",") + keyword_ids = user_labels.id_for_names(names=keywords, raise_if_unknown=True)[0] + + dataset_info = schemas.dataset.DatasetInDB.from_orm(dataset).dict() + if verbose_info or keyword_ids: + viz_client.initialize( + user_id=current_user.id, + project_id=dataset.project_id, + user_labels=user_labels, + ) + try: + if verbose_info: + # get cks and tags + dataset_stats = viz_client.get_dataset_info(dataset_hash=dataset.hash) + else: + # get negative info based on given keywords + dataset_stats = viz_client.get_dataset_analysis( + dataset_hash=dataset.hash, keyword_ids=keyword_ids, require_hist=False + ) + except ValueError: + logger.exception("[dataset info] could not convert class_id to class_name, return with basic info") + pass + except FailedToParseVizResponse: + logger.exception("[dataset info] could not get dataset info from viewer, return with basic info") + pass + else: + dataset_info.update(dataset_stats) + + return {"result": dataset_info} @router.get( @@ -293,7 +338,11 @@ def get_assets_of_dataset( dataset_id: int = Path(..., example="12"), offset: int = 0, limit: int = settings.DEFAULT_LIMIT, - keyword: Optional[str] = Query(None), + keywords_str: Optional[str] = Query(None, example="person,cat", alias="keywords"), + cm_types_str: Optional[str] = Query(None, example="tp,mtp", alias="cm_types"), + cks_str: Optional[str] = Query(None, example="shenzhen,shanghai", alias="cks"), + tags_str: Optional[str] = Query(None, example="big,small", alias="tags"), + annotation_types_str: Optional[str] = Query(None, example="gt,pred", alias="annotation_types"), viz_client: VizClient = Depends(deps.get_viz_client), current_user: models.User = Depends(deps.get_current_active_user), user_labels: UserLabels = Depends(deps.get_user_labels), @@ -306,23 +355,25 @@ def get_assets_of_dataset( if not dataset: raise DatasetNotFound() - keyword_id = user_labels.get_class_ids(keyword)[0] if keyword else None + keywords = keywords_str.split(",") if keywords_str else None + keyword_ids = user_labels.id_for_names(names=keywords, raise_if_unknown=True)[0] if keywords else None + viz_client.initialize( user_id=current_user.id, project_id=dataset.project_id, - branch_id=dataset.hash, + user_labels=user_labels, ) assets = viz_client.get_assets( - keyword_id=keyword_id, + dataset_hash=dataset.hash, + keyword_ids=keyword_ids, + cm_types=stringtolist(cm_types_str), + cks=stringtolist(cks_str), + tags=stringtolist(tags_str), + annotation_types=stringtolist(annotation_types_str), limit=limit, offset=offset, - user_labels=user_labels, ) - result = { - "items": assets.items, - "total": assets.total, - } - return {"result": result} + return {"result": assets} @router.get( @@ -348,17 +399,17 @@ def get_random_asset_id_of_dataset( viz_client.initialize( user_id=current_user.id, project_id=dataset.project_id, - branch_id=dataset.hash, + user_labels=user_labels, ) assets = viz_client.get_assets( + dataset_hash=dataset.hash, keyword_id=None, offset=offset, limit=1, - user_labels=user_labels, ) - if len(assets.items) == 0: + if assets["total"] == 0: raise AssetNotFound() - return {"result": assets.items[0]} + return {"result": assets["items"][0]} def get_random_asset_offset(dataset: models.Dataset) -> int: @@ -391,50 +442,42 @@ def get_asset_of_dataset( viz_client.initialize( user_id=current_user.id, project_id=dataset.project_id, - branch_id=dataset.hash, - ) - asset = viz_client.get_asset( - asset_id=asset_hash, user_labels=user_labels, ) - if not asset: + assets = viz_client.get_assets(dataset_hash=dataset.hash, asset_hash=asset_hash, limit=1) + if assets["total"] == 0: raise AssetNotFound() - return {"result": asset} + return {"result": assets["items"][0]} -def fusion_normalize_parameters( +def normalize_fusion_parameter( db: Session, - task_in: schemas.DatasetsFusionParameter, + fusion_params: schemas.DatasetsFusionParameter, user_labels: UserLabels, ) -> Dict: - include_datasets_info = crud.dataset.get_multi_by_ids(db, ids=[task_in.main_dataset_id] + task_in.include_datasets) - - include_datasets_info.sort( - key=attrgetter("update_datetime"), - reverse=(task_in.include_strategy == MergeStrategy.prefer_newest), + in_datasets = crud.dataset.get_multi_by_ids( + db, ids=[fusion_params.main_dataset_id] + fusion_params.include_datasets ) - - exclude_datasets_info = crud.dataset.get_multi_by_ids(db, ids=task_in.exclude_datasets) - parameters = dict( - include_datasets=[dataset_info.hash for dataset_info in include_datasets_info], - include_strategy=task_in.include_strategy, - exclude_datasets=[dataset_info.hash for dataset_info in exclude_datasets_info], - include_class_ids=user_labels.get_class_ids(names_or_aliases=task_in.include_labels), - exclude_class_ids=user_labels.get_class_ids(names_or_aliases=task_in.exclude_labels), - sampling_count=task_in.sampling_count, + in_datasets.sort( + key=attrgetter("create_datetime"), + reverse=(fusion_params.include_strategy == MergeStrategy.prefer_newest), ) - - return parameters + ex_datasets = crud.dataset.get_multi_by_ids(db, ids=fusion_params.exclude_datasets) + return { + "include_datasets": [dataset.hash for dataset in in_datasets], + "strategy": fusion_params.include_strategy, + "exclude_datasets": [dataset.hash for dataset in ex_datasets], + "include_class_ids": user_labels.id_for_names(names=fusion_params.include_labels, raise_if_unknown=True)[0], + "exclude_class_ids": user_labels.id_for_names(names=fusion_params.exclude_labels, raise_if_unknown=True)[0], + "sampling_count": fusion_params.sampling_count, + } -@router.post( - "/fusion", - response_model=schemas.DatasetOut, -) +@router.post("/fusion", response_model=schemas.DatasetOut) def create_dataset_fusion( *, db: Session = Depends(deps.get_db), - task_in: schemas.DatasetsFusionParameter, + in_fusion: schemas.DatasetsFusionParameter, current_user: models.User = Depends(deps.get_current_active_user), controller_client: ControllerClient = Depends(deps.get_controller_client), user_labels: UserLabels = Depends(deps.get_user_labels), @@ -442,94 +485,241 @@ def create_dataset_fusion( """ Create data fusion """ - logger.info( - "[create task] create dataset fusion with payload: %s", - jsonable_encoder(task_in), - ) + logger.info("[fusion] create dataset fusion with payload: %s", in_fusion.json()) with get_iteration_context_converter(db, user_labels) as iteration_context_converter: - task_in_parameters = iteration_context_converter(task_in) + fusion_params = iteration_context_converter(in_fusion) - parameters = fusion_normalize_parameters(db, task_in_parameters, user_labels) - task_hash = gen_task_hash(current_user.id, task_in.project_id) + parameters = normalize_fusion_parameter(db, fusion_params, user_labels) + task_hash = gen_task_hash(current_user.id, in_fusion.project_id) try: - resp = controller_client.create_data_fusion( + controller_client.create_data_fusion( current_user.id, - task_in.project_id, + in_fusion.project_id, task_hash, parameters, ) - logger.info("[create task] controller response: %s", resp) except ValueError: - raise FailedtoCreateTask() + logger.exception("[fusion] failed to create fusion via controller") + raise ControllerError() - # 1. create task task = crud.task.create_placeholder( db, type_=TaskType.data_fusion, user_id=current_user.id, - project_id=task_in.project_id, + project_id=in_fusion.project_id, hash_=task_hash, state_=TaskState.pending, - parameters=task_in.json(), + parameters=in_fusion.json(), ) - logger.info("[create dataset] related task record created: %s", task.hash) + logger.info("[fusion] related task record created: %s", task.hash) - # 2. create dataset record - dataset_group = crud.dataset_group.get(db, id=task_in.dataset_group_id) + dataset_group = crud.dataset_group.get(db, id=in_fusion.dataset_group_id) if not dataset_group: raise DatasetGroupNotFound() - dataset_in = schemas.DatasetCreate( - hash=task.hash, - dataset_group_id=task_in.dataset_group_id, - project_id=task.project_id, - user_id=task.user_id, - source=task.type, - task_id=task.id, - ) - dataset = crud.dataset.create_with_version(db, obj_in=dataset_in, dest_group_name=dataset_group.name) - logger.info("[create dataset] dataset record created: %s", dataset.name) + fused_dataset = crud.dataset.create_as_task_result(db, task, dataset_group.id, description=in_fusion.description) + logger.info("[fusion] dataset record created: %s", fused_dataset.name) - return {"result": dataset} + if parameters.get("include_class_ids"): + # update keywords usage metrics when necessary + send_keywords_metrics( + current_user.id, + in_fusion.project_id, + task.hash, + parameters["include_class_ids"], + int(task.create_datetime.timestamp()), + ) + return {"result": fused_dataset} -@router.post( - "/evaluation", - response_model=schemas.dataset.DatasetEvaluationOut, -) -def evaluate_datasets( + +@router.post("/evaluation", response_model=schemas.dataset.DatasetEvaluationOut) +def batch_evaluate_datasets( *, db: Session = Depends(deps.get_db), - evaluation_in: schemas.dataset.DatasetEvaluationCreate, + in_evaluation: schemas.dataset.DatasetEvaluationCreate, current_user: models.User = Depends(deps.get_current_active_user), controller_client: ControllerClient = Depends(deps.get_controller_client), - viz_client: VizClient = Depends(deps.get_viz_client), user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: """ - evaluate dataset against ground truth + evaluate datasets by themselves """ - gt_dataset = crud.dataset.get(db, id=evaluation_in.gt_dataset_id) - other_datasets = crud.dataset.get_multi_by_ids(db, ids=evaluation_in.other_dataset_ids) - if not gt_dataset or len(evaluation_in.other_dataset_ids) != len(other_datasets): - raise DatasetNotFound() - if not is_same_group([gt_dataset, *other_datasets]): - # confine evaluation to the same dataset group - raise DatasetsNotInSameGroup() + logger.info("[evaluate] evaluate datasets with payload: %s", in_evaluation.json()) + datasets = ensure_datasets_are_ready(db, dataset_ids=in_evaluation.dataset_ids) + dataset_id_mapping = {dataset.hash: dataset.id for dataset in datasets} - evaluations = evaluate_dataset( + evaluations = evaluate_datasets( controller_client, - viz_client, current_user.id, - evaluation_in.project_id, + in_evaluation.project_id, user_labels, - evaluation_in.confidence_threshold, - gt_dataset, - other_datasets, + in_evaluation.confidence_threshold, + in_evaluation.iou_threshold, + in_evaluation.require_average_iou, + in_evaluation.need_pr_curve, + in_evaluation.main_ck, + dataset_id_mapping, ) return {"result": evaluations} -def is_same_group(datasets: List[models.Dataset]) -> bool: - return len({dataset.dataset_group_id for dataset in datasets}) == 1 +@router.post("/check_duplication", response_model=schemas.dataset.DatasetCheckDuplicationOut) +def check_duplication( + *, + db: Session = Depends(deps.get_db), + in_datasets: schemas.dataset.MultiDatasetsWithProjectID, + current_user: models.User = Depends(deps.get_current_active_user), + viz_client: VizClient = Depends(deps.get_viz_client), +) -> Any: + """ + check duplication in two datasets + """ + datasets = ensure_datasets_are_ready(db, dataset_ids=in_datasets.dataset_ids) + + viz_client.initialize(user_id=current_user.id, project_id=in_datasets.project_id) + duplicated_stats = viz_client.check_duplication([dataset.hash for dataset in datasets]) + duplicated_asset_count = duplicated_stats["duplication"] + return {"result": duplicated_asset_count} + + +@router.post("/merge", response_model=schemas.dataset.DatasetOut) +def merge_datasets( + *, + db: Session = Depends(deps.get_db), + in_merge: schemas.dataset.DatasetMergeCreate, + current_user: models.User = Depends(deps.get_current_active_user), + controller_client: ControllerClient = Depends(deps.get_controller_client), +) -> Any: + """ + Merge multiple datasets + """ + logger.info("[merge] merge dataset with payload: %s", in_merge.json()) + if in_merge.dest_group_name: + if crud.dataset_group.is_duplicated_name_in_project( + db, project_id=in_merge.project_id, name=in_merge.dest_group_name + ): + raise DuplicateDatasetGroupError() + dest_group = crud.dataset_group.create_dataset_group( + db, + name=in_merge.dest_group_name, + user_id=current_user.id, + project_id=in_merge.project_id, + ) + elif in_merge.dest_group_id: + dest_group = crud.dataset_group.get(db, id=in_merge.dest_group_id) # type: ignore + if not dest_group: + raise DatasetGroupNotFound() + else: + raise RequiredFieldMissing() + + in_datasets = ensure_datasets_are_ready(db, dataset_ids=in_merge.include_datasets) + in_datasets.sort( + key=attrgetter("create_datetime"), + reverse=(in_merge.merge_strategy == MergeStrategy.prefer_newest), + ) + ex_datasets = ( + ensure_datasets_are_ready(db, dataset_ids=in_merge.exclude_datasets) if in_merge.exclude_datasets else None + ) + + task_hash = gen_task_hash(current_user.id, in_merge.project_id) + try: + controller_client.merge_datasets( + current_user.id, + in_merge.project_id, + task_hash, + [d.hash for d in in_datasets] if in_datasets else None, + [d.hash for d in ex_datasets] if ex_datasets else None, + in_merge.merge_strategy, + ) + except ValueError: + logger.exception("[merge] failed to create merge via controller") + raise ControllerError() + + task = crud.task.create_placeholder( + db, + type_=TaskType.merge, + user_id=current_user.id, + project_id=in_merge.project_id, + hash_=task_hash, + state_=TaskState.pending, + parameters=in_merge.json(), + ) + logger.info("[merge] related task record created: %s", task.hash) + + merged_dataset = crud.dataset.create_as_task_result(db, task, dest_group.id, description=in_merge.description) + return {"result": merged_dataset} + + +@router.post("/filter", response_model=schemas.dataset.DatasetOut) +def filter_dataset( + *, + db: Session = Depends(deps.get_db), + in_filter: schemas.dataset.DatasetFilterCreate, + current_user: models.User = Depends(deps.get_current_active_user), + controller_client: ControllerClient = Depends(deps.get_controller_client), + user_labels: UserLabels = Depends(deps.get_user_labels), +) -> Any: + """ + Filter dataset + """ + logger.info("[filter] filter dataset with payload: %s", in_filter.json()) + datasets = ensure_datasets_are_ready(db, dataset_ids=[in_filter.dataset_id]) + main_dataset = datasets[0] + + class_ids = ( + user_labels.id_for_names(names=in_filter.include_keywords, raise_if_unknown=True)[0] + if in_filter.include_keywords + else None + ) + ex_class_ids = ( + user_labels.id_for_names(names=in_filter.exclude_keywords, raise_if_unknown=True)[0] + if in_filter.exclude_keywords + else None + ) + + task_hash = gen_task_hash(current_user.id, in_filter.project_id) + try: + controller_client.filter_dataset( + current_user.id, + in_filter.project_id, + task_hash, + main_dataset.hash, + class_ids, + ex_class_ids, + in_filter.sampling_count, + ) + except ValueError: + logger.exception("[filter] failed to create filter via controller") + raise ControllerError() + + task = crud.task.create_placeholder( + db, + type_=TaskType.filter, + user_id=current_user.id, + project_id=in_filter.project_id, + hash_=task_hash, + state_=TaskState.pending, + parameters=in_filter.json(), + ) + logger.info("[filter] related task record created: %s", task.hash) + filtered_dataset = crud.dataset.create_as_task_result( + db, task, main_dataset.dataset_group_id, description=in_filter.description + ) + if class_ids: + # update keywords usage metrics when necessary + send_keywords_metrics( + current_user.id, + in_filter.project_id, + task.hash, + class_ids, + int(task.create_datetime.timestamp()), + ) + return {"result": filtered_dataset} + + +def stringtolist(s: Optional[str]) -> Optional[List]: + if s is None: + return s + return s.split(",") diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/graphs.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/graphs.py deleted file mode 100644 index f3cfdfb4a4..0000000000 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/graphs.py +++ /dev/null @@ -1,58 +0,0 @@ -from enum import Enum -from typing import Any - -from fastapi import APIRouter, Depends, Query -from sqlalchemy.orm import Session - -from app import crud, schemas -from app.api import deps -from app.api.errors.errors import DatasetNotFound, GraphNotFound, ModelNotFound -from app.config import settings -from app.utils.graph import GraphClient - -router = APIRouter() - - -class NodeType(str, Enum): - dataset = "dataset" - model = "model" - - -@router.get( - "/", - response_model=schemas.GraphOut, - responses={404: {"description": "Node Not Found"}}, -) -def get_graph( - db: Session = Depends(deps.get_db), - graph_db: GraphClient = Depends(deps.get_graph_client_of_user), - type_: NodeType = Query(..., alias="type", description="type of Node, including model and dataset"), - id_: int = Query(..., alias="id", description="model_id or dataset_id"), - max_hops: int = Query(settings.MAX_HOPS, description="max distance from given node to target nodes"), -) -> Any: - """ - Get history of dataset or model in Graph - - type: "model" or "dataset" - id: model_id or dataset_id - max_hops: max distence - """ - node_obj = getattr(crud, type_).get(db, id=id_) - if not node_obj: - if type_ == "dataset": - raise DatasetNotFound() - else: - raise ModelNotFound() - - source = { - "label": type_.capitalize(), - "hash": node_obj.hash, - "id": id_, - } - # find all the nodes within max_hops pointing to the source - # and all the nodes the source node pointing to (1 hop) - res = graph_db.query_history(source, max_hops) - if not res: - raise GraphNotFound() - - return {"result": res} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py index 2343933558..b91301ef2e 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py @@ -71,7 +71,7 @@ def create_docker_image( This endpint will create an image record immediately, but the pulling process will run in background """ - if crud.docker_image.docker_name_exists(db, url=docker_image_in.url): + if crud.docker_image.get_by_url(db, docker_image_in.url) or crud.docker_image.get_by_name(db, docker_image_in.name): raise DuplicateDockerImageError() docker_image = crud.docker_image.create(db, obj_in=docker_image_in) logger.info("[create image] docker image record created: %s", docker_image) @@ -107,10 +107,11 @@ def import_docker_image( ) crud.image_config.create(db, obj_in=image_config_in) + enable_livecode = bool(resp.get("enable_livecode", False)) crud.docker_image.update_from_dict( db, docker_image_id=docker_image.id, - updates={"hash": hash_, "state": int(DockerImageState.done)}, + updates={"hash": hash_, "state": int(DockerImageState.done), "enable_livecode": enable_livecode}, ) logger.info( "[create image] docker image imported via controller: %s, added %d configs", @@ -231,6 +232,9 @@ def update_docker_image( if not docker_image: raise DockerImageNotFound() + if docker_image_update.name and crud.docker_image.get_by_name(db, name=docker_image_update.name): + raise DuplicateDockerImageError() + docker_image = crud.docker_image.update(db, db_obj=docker_image, obj_in=docker_image_update) return {"result": docker_image} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py index a0c4611815..02c688f65b 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py @@ -11,7 +11,7 @@ FailedToCallInference, FailedtoDownloadError, InvalidInferenceConfig, - ModelNotFound, + ModelStageNotFound, ) from app.config import settings from app.utils.files import FailedToDownload, save_files @@ -34,10 +34,10 @@ def call_inference( """ Call Inference """ - model = crud.model.get(db, id=inference_in.model_id) - if not model: - logger.error("Failed to find model id: %s", inference_in.model_id) - raise ModelNotFound() + model_stage = crud.model_stage.get(db, id=inference_in.model_stage_id) + if not model_stage: + logger.error("Failed to find model stage id: %s", inference_in.model_stage_id) + raise ModelStageNotFound() docker_image = crud.docker_image.get_inference_docker_image(db, url=inference_in.docker_image) if not docker_image: @@ -53,8 +53,9 @@ def call_inference( try: resp = controller_client.call_inference( current_user.id, - model.project_id, - model.hash, + inference_in.project_id, + model_stage.model.hash, # type: ignore + model_stage.name, asset_dir, docker_image.url, json.dumps(inference_in.docker_image_config), @@ -64,7 +65,7 @@ def call_inference( raise FailedToCallInference() result = { - "model_id": inference_in.model_id, + "model_stage_id": inference_in.model_stage_id, "annotations": extract_inference_annotations(resp, filename_mapping=filename_mapping), } return {"result": result} @@ -76,5 +77,5 @@ def extract_inference_annotations( for filename, annotations in resp[inference_type]["image_annotations"].items(): yield { "image_url": filename_mapping[filename], - "detection": annotations["annotations"], + "detection": annotations["boxes"], } diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/info.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/info.py index 62e5fbc266..dceae06cb8 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/info.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/info.py @@ -6,6 +6,7 @@ from app import models, schemas from app.api import deps from app.api.errors.errors import FailedtoGetSysInfo +from app.config import settings from app.utils.ymir_controller import ControllerClient router = APIRouter() @@ -24,7 +25,8 @@ def get_sys_info( Get current system information, available GPUs for example """ try: - gpu_info = controller_client.get_gpu_info(current_user.id) + sys_info = controller_client.get_gpu_info(current_user.id) except grpc.RpcError: raise FailedtoGetSysInfo() - return {"result": gpu_info} + sys_info["openpai_enabled"] = settings.OPENPAI_ENABLED + return {"result": sys_info} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/iterations.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/iterations.py index 0d4b532234..3236de3219 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/iterations.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/iterations.py @@ -6,7 +6,12 @@ from app import crud, models, schemas from app.api import deps -from app.api.errors.errors import IterationNotFound +from app.api.errors.errors import IterationNotFound, TaskNotFound, IterationStepNotFound, IterationStepHasFinished +from app.crud.crud_iteration_step import StepNotFound +from app.libs.iterations import calculate_mining_progress +from app.libs.iteration_steps import initialize_steps + +from common_utils.labels import UserLabels router = APIRouter() @@ -24,6 +29,7 @@ def create_iteration( iteration = crud.iteration.create_with_user_id(db, user_id=current_user.id, obj_in=obj_in) logger.info("[create iteration] iteration record created: %s", iteration) crud.project.update_current_iteration(db, project_id=obj_in.project_id, iteration_id=iteration.id) + initialize_steps(db, iteration.id) return {"result": iteration} @@ -82,3 +88,96 @@ def update_iteration( raise IterationNotFound() crud.iteration.update_iteration(db, iteration_id=iteration_id, iteration_update=iteration_updates) return {"result": iteration} + + +@router.get("/{iteration_id}/mining_progress", response_model=schemas.iteration.IterationMiningProgressOut) +def get_mining_progress_of_iteration( + db: Session = Depends(deps.get_db), + current_user: models.User = Depends(deps.get_current_active_user), + project_id: int = Query(...), + iteration_id: int = Path(...), + user_labels: UserLabels = Depends(deps.get_user_labels), +) -> Any: + """ + Get mining progress of specific iteration + """ + stats = calculate_mining_progress(db, user_labels, current_user.id, project_id, iteration_id) + return {"result": stats} + + +@router.get( + "/{iteration_id}/steps", + response_model=schemas.IterationStepsOut, +) +def list_iteration_steps( + *, + db: Session = Depends(deps.get_db), + iteration_id: int = Path(...), + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + steps = crud.iteration_step.get_multi_by_iteration(db, iteration_id=iteration_id) + return {"result": steps} + + +@router.get( + "/{iteration_id}/steps/{step_id}", + response_model=schemas.IterationStepOut, +) +def get_iteration_step( + *, + db: Session = Depends(deps.get_db), + iteration_id: int = Path(...), + step_id: int = Path(...), + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + step = crud.iteration_step.get(db, step_id) + return {"result": step} + + +@router.post( + "/{iteration_id}/steps/{step_id}/start", + response_model=schemas.IterationStepOut, +) +def start_iteration_step( + *, + db: Session = Depends(deps.get_db), + iteration_id: int = Path(...), + step_id: int = Path(...), + task_id: int = Query(...), + user_labels: UserLabels = Depends(deps.get_user_labels), + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + """ + start given step: + 1. bind existing task to given step + 2. record task result and record dataset_id or model_id in step record + """ + step = crud.iteration_step.get(db, step_id) + if not step: + raise IterationStepNotFound() + if step.is_finished: + raise IterationStepHasFinished() + + task_in_db = crud.task.get(db, task_id) + if not task_in_db: + raise TaskNotFound() + step = crud.iteration_step.start(db, id=step_id, task_id=task_in_db.id) + return {"result": step} + + +@router.post( + "/{iteration_id}/steps/{step_id}/finish", + response_model=schemas.IterationStepOut, +) +def finish_iteration_step( + *, + db: Session = Depends(deps.get_db), + iteration_id: int = Path(...), + step_id: int = Path(...), + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + try: + step = crud.iteration_step.finish(db, id=step_id) + except StepNotFound: + raise IterationStepNotFound() + return {"result": step} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/keywords.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/keywords.py index 16c77f9291..c1f8edc9d7 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/keywords.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/keywords.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, List, Optional +from typing import Any, Dict, Optional from fastapi import APIRouter, Depends, Query from fastapi.logger import logger @@ -6,14 +6,17 @@ from app import models from app.api import deps from app.config import settings -from app.schemas import ( - KeywordsCreate, +from app.schemas.keyword import ( KeywordsCreateOut, KeywordsPaginationOut, KeywordUpdate, + KeywordsInput, + KeywordsCheckDupOut, ) from app.utils.cache import CacheClient from app.utils.ymir_controller import ControllerClient +from app.libs.common import pagination +from app.libs.labels import upsert_labels from common_utils.labels import SingleLabel, UserLabels router = APIRouter() @@ -42,36 +45,40 @@ def get_keywords( if settings.REVERSE_KEYWORDS_OUTPUT: items.reverse() - res = {"total": len(items), "items": paginate(items, offset, limit)} + res = {"total": len(items), "items": pagination(items, offset, limit)} return {"result": res} @router.post("/", response_model=KeywordsCreateOut) def create_keywords( *, - keywords_input: KeywordsCreate, + keywords_input: KeywordsInput, current_user: models.User = Depends(deps.get_current_active_user), controller_client: ControllerClient = Depends(deps.get_controller_client), cache: CacheClient = Depends(deps.get_cache), - user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: """ Batch create given keywords and aliases to keywords list """ - new_user_labels = UserLabels(labels=keywords_input.keywords) - dups = user_labels.find_dups(new_user_labels) - if dups: - logger.info(f"find dups in new_user_labels {new_user_labels}") - return {"result": {"failed": dups}} - - return process_update_labels( + new_labels = UserLabels(labels=keywords_input.keywords) + result = upsert_labels( user_id=current_user.id, - user_labels=user_labels, - new_user_labels=new_user_labels, - dry_run=keywords_input.dry_run, + new_labels=new_labels, controller_client=controller_client, - cache=cache, ) + cache.delete_personal_keywords_cache() + return {"result": result} + + +@router.post("/check_duplication", response_model=KeywordsCheckDupOut) +def check_keywords_duplication( + *, + keywords_input: KeywordsInput, + user_labels: UserLabels = Depends(deps.get_user_labels), +) -> Any: + new_user_labels = UserLabels(labels=keywords_input.keywords) + dups = user_labels.find_dups(new_user_labels) + return {"result": dups} @router.patch( @@ -85,46 +92,13 @@ def update_keyword_aliases( current_user: models.User = Depends(deps.get_current_active_user), controller_client: ControllerClient = Depends(deps.get_controller_client), cache: CacheClient = Depends(deps.get_cache), - user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: updated_label = SingleLabel(name=keyword, aliases=aliases_in.aliases) - new_user_labels = UserLabels(labels=[updated_label]) - return process_update_labels( + new_labels = UserLabels(labels=[updated_label]) + result = upsert_labels( user_id=current_user.id, - user_labels=user_labels, - new_user_labels=new_user_labels, - dry_run=False, + new_labels=new_labels, controller_client=controller_client, - cache=cache, ) - - -def paginate(items: List[Any], offset: int = 0, limit: Optional[int] = None) -> List[Any]: - """ - Mimic the behavior of database query's offset-limit pagination - """ - end = limit + offset if limit is not None else None - return items[offset:end] - - -def process_update_labels( - user_id: int, - user_labels: UserLabels, - new_user_labels: UserLabels, - dry_run: bool, - controller_client: ControllerClient, - cache: CacheClient, -) -> Dict: - logger.info(f"old labels: {user_labels.json()}\nnew labels: {new_user_labels.json()}") - resp = controller_client.add_labels(user_id, new_user_labels, dry_run) - logger.info(f"[controller] response for update label: {resp}") - - conflict_labels = [] - if resp.get("label_collection"): - for conflict_label in resp["label_collection"]["labels"]: - conflict_labels += [conflict_label["name"]] + conflict_label["aliases"] - - if not conflict_labels: - # clean cached key when changes happen - cache.delete_personal_keywords_cache() - return {"result": {"failed": conflict_labels}} + cache.delete_personal_keywords_cache() + return {"result": result} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/login.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/login.py index 203cde5107..38f483d992 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/login.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/login.py @@ -17,6 +17,7 @@ from app.config import settings from app.utils import security from app.utils.email import send_reset_password_email +from common_utils.version import YMIR_VERSION router = APIRouter() @@ -62,6 +63,7 @@ def login_access_token( "id": user.id, "role": role.name, } + token_payload["version"] = YMIR_VERSION payload = { "access_token": security.create_access_token(token_payload, expires_delta=access_token_expires), "token_type": "bearer", diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_groups.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_groups.py index bf25f07ae5..1a371100be 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_groups.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_groups.py @@ -104,6 +104,9 @@ def update_model_group( if not model_group: raise ModelGroupNotFound() + if crud.model_group.is_duplicated_name_in_project(db, project_id=model_group.project_id, name=obj_update.name): + raise DuplicateModelGroupError() + model_group = crud.model_group.update(db, db_obj=model_group, obj_in=obj_update) return {"result": model_group} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_stages.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_stages.py new file mode 100644 index 0000000000..fbc4067880 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/model_stages.py @@ -0,0 +1,42 @@ +from typing import Any + +from fastapi import APIRouter, Depends, Path, Query +from sqlalchemy.orm import Session + +from app import crud, models, schemas +from app.api import deps +from app.api.errors.errors import ModelStageNotFound + +router = APIRouter() + + +@router.get( + "/batch", + response_model=schemas.ModelStagesOut, +) +def batch_get_models( + db: Session = Depends(deps.get_db), + model_stage_ids: str = Query(None, alias="ids"), +) -> Any: + ids = [int(i) for i in model_stage_ids.split(",")] + stages = crud.model_stage.get_multi_by_ids(db, ids=ids) + return {"result": stages} + + +@router.get( + "/{model_stage_id}", + response_model=schemas.ModelStageOut, +) +def get_model_stage( + *, + db: Session = Depends(deps.get_db), + model_stage_id: int = Path(...), + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + """ + Get a model stage detail + """ + model_stage = crud.model_stage.get(db, id=model_stage_id) + if not model_stage: + raise ModelStageNotFound() + return {"result": model_stage} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py index 7a809afa79..8e15551b67 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py @@ -149,7 +149,6 @@ def import_model( model_group_in = schemas.ModelGroupCreate( name=model_import.group_name, project_id=model_import.project_id, - description=model_import.description, ) model_group = crud.model_group.create_with_user_id(db=db, user_id=current_user.id, obj_in=model_group_in) @@ -220,6 +219,27 @@ def delete_model( return {"result": model} +@router.patch( + "/{model_id}", + response_model=schemas.ModelOut, + responses={ + 400: {"description": "No permission"}, + 404: {"description": "Model Not Found"}, + }, +) +def update_model( + *, + db: Session = Depends(deps.get_db), + model_id: int = Path(..., example="12"), + stage: schemas.StageChange, + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + + model = crud.model.update_recommonded_stage(db, model_id=model_id, stage_id=stage.stage_id) + + return {"result": model} + + @router.get( "/{model_id}", response_model=schemas.ModelOut, diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py index 4eebc0ef5a..d49871acfe 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py @@ -1,7 +1,6 @@ import enum -import json -import time from typing import Any +import uuid from fastapi import APIRouter, Depends, Path, Query, BackgroundTasks from fastapi.logger import logger @@ -13,17 +12,15 @@ ProjectNotFound, DuplicateProjectError, FailedToCreateProject, - FailedToConnectClickHouse, NoDatasetPermission, DatasetNotFound, ) from app.config import settings from app.constants.state import ResultState, RunningStates, TaskType, TrainingType from app.utils.cache import CacheClient -from app.utils.clickhouse import YmirClickHouse from app.utils.ymir_controller import ControllerClient, gen_task_hash -from app.libs.projects import setup_sample_project_in_background -from app.libs.keywords import add_keywords +from app.libs.projects import setup_sample_project_in_background, send_project_metrics +from app.libs.labels import ensure_labels_exist from common_utils.labels import UserLabels router = APIRouter() @@ -67,6 +64,7 @@ def list_projects( start_time=start_time, end_time=end_time, ) + return {"result": {"total": total, "items": projects}} @@ -83,33 +81,42 @@ def create_sample_project( """ Create sample project """ - project_name = f"sample_project_{current_user.username}_{time.time()}" + project_name = f"sample_project_{uuid.uuid4().hex[:8]}" project_in = schemas.ProjectCreate( name=project_name, training_keywords=settings.SAMPLE_PROJECT_KEYWORDS, - chunk_size=1, + chunk_size=2, is_example=True, ) project = crud.project.create_project(db, user_id=current_user.id, obj_in=project_in) project_task_hash = gen_task_hash(current_user.id, project.id) + training_class_ids = ensure_labels_exist( + user_id=current_user.id, + user_labels=user_labels, + controller_client=controller_client, + keywords=settings.SAMPLE_PROJECT_KEYWORDS, + cache=cache, + ) try: - user_labels.get_class_ids(names_or_aliases=settings.SAMPLE_PROJECT_KEYWORDS) - except KeyError: - # todo refactor keywords dependencies to handle ensure given keywords exist - add_keywords(controller_client, cache, current_user.id, settings.SAMPLE_PROJECT_KEYWORDS) - - try: - resp = controller_client.create_project( + controller_client.create_project( user_id=current_user.id, project_id=project.id, task_id=project_task_hash, ) - logger.info("[create task] controller response: %s", resp) except ValueError: crud.project.soft_remove(db, id=project.id) raise FailedToCreateProject() + send_project_metrics( + current_user.id, + project.id, + project.name, + training_class_ids, + TrainingType(project.training_type).name, + int(project.create_datetime.timestamp()), + ) + background_tasks.add_task( setup_sample_project_in_background, db, @@ -129,7 +136,7 @@ def create_project( current_user: models.User = Depends(deps.get_current_active_user), project_in: schemas.ProjectCreate, controller_client: ControllerClient = Depends(deps.get_controller_client), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), + user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: """ Create project @@ -154,54 +161,48 @@ def create_project( crud.project.soft_remove(db, id=project.id) raise FailedToCreateProject() - # 3.create task info - task = crud.task.create_placeholder( - db, type_=TaskType.create_project, user_id=current_user.id, project_id=project.id - ) - - # 3.create dataset group to build dataset info - dataset_name = f"{project_in.name}_training_dataset" - dataset_paras = schemas.DatasetGroupCreate(name=dataset_name, project_id=project.id, user_id=current_user.id) - dataset_group = crud.dataset_group.create_with_user_id(db, user_id=current_user.id, obj_in=dataset_paras) - - # 4.create init dataset - dataset_in = schemas.DatasetCreate( - name=dataset_name, - hash=task_id, - dataset_group_id=dataset_group.id, - project_id=project.id, - user_id=current_user.id, - source=task.type, - result_state=ResultState.ready, - task_id=task.id, - ) - initial_dataset = crud.dataset.create_with_version(db, obj_in=dataset_in) + if project_in.enable_iteration: + # 3.create task info + task = crud.task.create_placeholder( + db, type_=TaskType.create_project, user_id=current_user.id, project_id=project.id + ) - # 5.update project info - project = crud.project.update_resources( - db, - project_id=project.id, - project_update=schemas.ProjectUpdate( - training_dataset_group_id=dataset_group.id, initial_training_dataset_id=initial_dataset.id - ), - ) + # 3.create dataset group to build dataset info + dataset_name = f"{project_in.name}_training_dataset" + dataset_paras = schemas.DatasetGroupCreate(name=dataset_name, project_id=project.id, user_id=current_user.id) + dataset_group = crud.dataset_group.create_with_user_id(db, user_id=current_user.id, obj_in=dataset_paras) - try: - clickhouse.save_project_parameter( - dt=project.create_datetime, - user_id=project.user_id, - id_=project.id, - name=project.name, - training_type=TrainingType(project.training_type).name, - training_keywords=json.loads(project.training_keywords), + # 4.create init dataset + dataset_in = schemas.DatasetCreate( + name=dataset_name, + hash=task_id, + dataset_group_id=dataset_group.id, + project_id=project.id, + user_id=current_user.id, + source=task.type, + result_state=ResultState.ready, + task_id=task.id, ) - except FailedToConnectClickHouse: - # clickhouse metric shouldn't block create task process - logger.exception( - "[create project metrics] failed to write project(%s) stats to clickhouse, continue anyway", - project.name, + initial_dataset = crud.dataset.create_with_version(db, obj_in=dataset_in) + + # 5.update project info + project = crud.project.update_resources( + db, + project_id=project.id, + project_update=schemas.ProjectUpdate( + training_dataset_group_id=dataset_group.id, initial_training_dataset_id=initial_dataset.id + ), ) + send_project_metrics( + current_user.id, + project.id, + project.name, + user_labels.id_for_names(names=project_in.training_keywords, raise_if_unknown=True)[0], + TrainingType(project.training_type).name, + int(project.create_datetime.timestamp()), + ) + logger.info("[create project] project record created: %s", project) return {"result": project} @@ -248,8 +249,10 @@ def update_project( raise DatasetNotFound() if project.training_dataset_group_id != dataset.dataset_group_id: raise NoDatasetPermission() - + if project_update.name and crud.project.is_duplicated_name(db, user_id=current_user.id, name=project_update.name): + raise DuplicateProjectError() project = crud.project.update_resources(db, project_id=project.id, project_update=project_update) + return {"result": project} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/stats.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/stats.py index 3b74b108d1..c7c5c1e0af 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/stats.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/stats.py @@ -1,12 +1,15 @@ -from datetime import datetime from enum import Enum -from typing import Any +import logging +import json +from typing import Any, Optional, List from fastapi import APIRouter, Depends, Query +from sqlalchemy.orm import Session -from app import models, schemas +from app import crud, models, schemas from app.api import deps -from app.utils.clickhouse import YmirClickHouse +from app.utils.ymir_viz import VizClient +from common_utils.labels import UserLabels router = APIRouter() @@ -17,90 +20,75 @@ class StatsPrecision(str, Enum): month = "month" -@router.get("/datasets/hot", response_model=schemas.StatsPopularDatasetsOut) -def get_most_popular_datasets( - limit: int = Query(10, description="limit the data point size"), - current_user: models.User = Depends(deps.get_current_active_user), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), -) -> Any: - """ - Get top datasets ordered by ref_count - """ - stats = clickhouse.get_popular_items(current_user.id, column="dataset_ids", limit=limit) - return {"result": stats} - - -@router.get("/models/hot", response_model=schemas.StatsPopularModelsOut) -def get_most_popular_models( - limit: int = Query(10, description="limit the data point size"), - current_user: models.User = Depends(deps.get_current_active_user), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), -) -> Any: - """ - Get top models ordered by ref_count - """ - stats = clickhouse.get_popular_items(current_user.id, column="model_ids", limit=limit) - return {"result": stats} - - -@router.get("/models/map", response_model=schemas.StatsModelmAPsOut) -def get_best_models( - limit: int = Query(10, description="limit the data point size"), - current_user: models.User = Depends(deps.get_current_active_user), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), -) -> Any: - """ - Get models of highest mAP score, grouped by keyword - """ - stats = clickhouse.get_models_order_by_map(current_user.id, limit=limit) - return {"result": stats} - - -@router.get("/keywords/hot", response_model=schemas.StatsPopularKeywordsOut) -def get_most_popular_keywords( - limit: int = Query(10, description="limit the data point size"), - current_user: models.User = Depends(deps.get_current_active_user), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), -) -> Any: - """ - Get top keywords ordered by ref_count - """ - stats = clickhouse.get_popular_items(current_user.id, column="keyword_ids", limit=limit) - return {"result": stats} - - -@router.get("/keywords/recommend", response_model=schemas.StatsKeywordsRecommendOut) +@router.get("/keywords/recommend", response_model=schemas.StatsMetricsQueryOut) def recommend_keywords( - dataset_ids: str = Query(..., description="recommend keywords based on given datasets", example="1,2,3"), + dataset_ids: str = Query(None, description="recommend keywords based on given datasets", example="1,2,3"), limit: int = Query(10, description="limit the data point size"), + db: Session = Depends(deps.get_db), current_user: models.User = Depends(deps.get_current_active_user), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), + viz_client: VizClient = Depends(deps.get_viz_client), + user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: """ - Recommend top keywords based on datasets + Recommend top keywords based on history tasks. """ - dataset_ids_ = [int(id_) for id_ in dataset_ids.split(",")] - stats = clickhouse.get_recommend_keywords(current_user.id, dataset_ids=dataset_ids_, limit=limit) + keyword_ids: Optional[List[int]] = None + if dataset_ids: + datasets = crud.dataset.get_multi_by_ids(db, ids=[int(i) for i in dataset_ids.split(",")]) + keywords = extract_keywords(datasets) + keyword_ids = user_labels.id_for_names(names=keywords, raise_if_unknown=True)[0] + + stats = viz_client.query_metrics( + metrics_group="task", + user_id=current_user.id, + query_field="class_ids", + bucket="count", + limit=limit, + keyword_ids=keyword_ids, + ) + for element in stats: + element["legend"] = user_labels.main_name_for_id(int(element["legend"])) + logging.info(f"viz stats: {stats}") return {"result": stats} -@router.get("/projects/count", response_model=schemas.StatsProjectsCountOut) +@router.get("/projects/count", response_model=schemas.StatsMetricsQueryOut) def get_projects_count( precision: StatsPrecision = Query(..., description="day, week or month"), limit: int = Query(10, description="limit the data point size"), current_user: models.User = Depends(deps.get_current_active_user), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), + viz_client: VizClient = Depends(deps.get_viz_client), ) -> Any: """ Get projects count divided by time ranges """ - end_at = datetime.now() - start_at = end_at.replace(end_at.year - 1) - stats = clickhouse.get_project_count( - current_user.id, - precision=precision.value, - start_at=start_at, - end_at=end_at, + stats = viz_client.query_metrics( + metrics_group="project", + user_id=current_user.id, + query_field="create_time", + bucket="time", + unit=precision.value, limit=limit, ) + + logging.info(f"viz stats: {stats}") return {"result": stats} + + +def extract_keywords(datasets: List[models.Dataset]) -> List[str]: + """ + dataset got keywords column which contains: + { + "gt": {"keyword": count}, + "pred": {"keyword": count}, + } + + extract all the keywords in gt and pred + """ + datasets_keywords = [json.loads(dataset.keywords) for dataset in datasets if dataset.keywords] + keywords = { + k + for dataset_keywords in datasets_keywords + for k in (list(dataset_keywords["gt"]) + list(dataset_keywords["pred"])) + } + return list(keywords) diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py index 9a9b527a6c..321305ccb4 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py @@ -14,7 +14,7 @@ from app.api import deps from app.api.errors.errors import ( DuplicateTaskError, - FailedToUpdateTaskStatus, + FailedToUpdateTaskStatusTemporally, ModelNotReady, NoTaskPermission, ObsoleteTaskStatus, @@ -26,11 +26,8 @@ TaskType, ) from app.config import settings -from app.utils.clickhouse import YmirClickHouse -from app.utils.graph import GraphClient from app.utils.timeutil import convert_datetime_to_timestamp from app.utils.ymir_controller import ControllerClient, gen_user_hash -from app.utils.ymir_viz import VizClient from app.libs.redis_stream import RedisStream from app.libs.tasks import TaskResult, create_single_task from common_utils.labels import UserLabels @@ -78,6 +75,8 @@ def list_tasks( name: str = Query(None, description="search by task name"), type_: TaskType = Query(None, alias="type"), state: TaskState = Query(None), + dataset_ids: str = Query(None, example="1,2,3"), + model_stage_ids: str = Query(None, example="4,5,6"), offset: int = Query(None), limit: int = Query(None), order_by: SortField = Query(SortField.id), @@ -96,6 +95,8 @@ def list_tasks( name=name, type_=type_, state=state, + dataset_ids=[int(i) for i in dataset_ids.split(",")] if dataset_ids else [], + model_stage_ids=[int(i) for i in model_stage_ids.split(",")] if model_stage_ids else [], offset=offset, limit=limit, order_by=order_by.name, @@ -248,10 +249,7 @@ def update_task_status( db: Session = Depends(deps.get_db), request: Request, task_update: schemas.TaskUpdateStatus, - graph_db: GraphClient = Depends(deps.get_graph_client), controller_client: ControllerClient = Depends(deps.get_controller_client), - viz_client: VizClient = Depends(deps.get_viz_client), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), ) -> Any: """ Update status of a task @@ -284,8 +282,8 @@ def update_task_status( try: updated_task = task_result.update(task_result=task_update) except (ConnectionError, HTTPError, Timeout): - logger.error("Failed to update update task status") - raise FailedToUpdateTaskStatus() + logger.exception("Failed to update update task status. Try again later") + raise FailedToUpdateTaskStatusTemporally() except ModelNotReady: logger.warning("Model Not Ready") else: @@ -302,6 +300,7 @@ def update_task_status( # reformatting is needed payload = {updated_task.hash: task_update_msg.dict()} asyncio.run(request.app.sio.emit(event="update_taskstate", data=payload, namespace=namespace)) + logger.info("notify task update (%s) to frontend (%s)", payload, namespace) return {"result": task_in_db} @@ -324,3 +323,25 @@ async def save_task_update_to_redis_stream(*, task_events: schemas.TaskMonitorEv await redis_stream.publish(event.json()) logger.info("save task update to redis stream: %s", event.json()) return Response(status_code=204) + + +@router.get( + "/pai/{task_id}", + response_model=schemas.task.PaiTaskOut, + response_model_exclude_none=True, + responses={404: {"description": "Task Not Found"}}, +) +def get_openpai_task( + db: Session = Depends(deps.get_db), + task_id: int = Path(..., example=12), + current_user: models.User = Depends(deps.get_current_active_user), + controller_client: ControllerClient = Depends(deps.get_controller_client), +) -> Any: + """ + Get verbose information of OpenPAI task + """ + task = crud.task.get_by_user_and_id(db, user_id=current_user.id, id=task_id) + if not task: + raise TaskNotFound() + # mixin openpai status + return {"result": task} diff --git a/ymir/backend/src/ymir_app/app/api/deps.py b/ymir/backend/src/ymir_app/app/api/deps.py index 4d8efcb121..d37ebb9c0f 100644 --- a/ymir/backend/src/ymir_app/app/api/deps.py +++ b/ymir/backend/src/ymir_app/app/api/deps.py @@ -13,16 +13,17 @@ InvalidScope, InvalidToken, UserNotFound, + SystemVersionConflict, ) from app.config import settings from app.constants.role import Roles from app.db.session import SessionLocal from app.utils import cache as ymir_cache -from app.utils import graph, security, ymir_controller, ymir_viz -from app.utils.clickhouse import YmirClickHouse +from app.utils import security, ymir_controller, ymir_viz from app.utils.security import verify_api_key from app.utils.ymir_controller import ControllerClient from common_utils.labels import UserLabels +from common_utils.version import YMIR_VERSION reusable_oauth2 = OAuth2PasswordBearer( tokenUrl=f"{settings.API_V1_STR}/auth/token", @@ -63,6 +64,10 @@ def get_current_user( except (jwt.JWTError, ValidationError): logger.exception("Invalid JWT token") raise InvalidToken() + + if token_data.version != YMIR_VERSION: + raise SystemVersionConflict() + user = crud.user.get(db, id=token_data.id) if not user: raise UserNotFound() @@ -112,26 +117,7 @@ def get_controller_client() -> Generator: def get_viz_client() -> Generator: try: - client = ymir_viz.VizClient(host=settings.VIZ_HOST) - yield client - finally: - client.close() - - -def get_graph_client() -> Generator: - try: - client = graph.GraphClient(redis_uri=settings.BACKEND_REDIS_URL) - yield client - finally: - client.close() - - -def get_graph_client_of_user( - current_user: models.User = Depends(get_current_active_user), -) -> Generator: - try: - client = graph.GraphClient(redis_uri=settings.BACKEND_REDIS_URL) - client.user_id = current_user.id + client = ymir_viz.VizClient() yield client finally: client.close() @@ -141,16 +127,16 @@ def get_cache( current_user: models.User = Depends(get_current_active_user), ) -> Generator: try: - cache_client = ymir_cache.CacheClient(settings.BACKEND_REDIS_URL, current_user.id) + cache_client = ymir_cache.CacheClient(redis_uri=settings.BACKEND_REDIS_URL, user_id=current_user.id) yield cache_client finally: cache_client.close() def get_user_labels( - current_user: models.User = Depends(get_current_active_user), - cache: ymir_cache.CacheClient = Depends(get_cache), - controller_client: ControllerClient = Depends(get_controller_client), + current_user: models.User = Depends(get_current_active_user), + cache: ymir_cache.CacheClient = Depends(get_cache), + controller_client: ControllerClient = Depends(get_controller_client), ) -> UserLabels: # todo: make a cache wrapper cached = cache.get(ymir_cache.KEYWORDS_CACHE_KEY) @@ -163,11 +149,3 @@ def get_user_labels( cache.set(ymir_cache.KEYWORDS_CACHE_KEY, user_labels.json()) return user_labels - - -def get_clickhouse_client() -> Generator: - try: - clickhouse_client = YmirClickHouse(settings.CLICKHOUSE_URI) - yield clickhouse_client - finally: - clickhouse_client.close() diff --git a/ymir/backend/src/ymir_app/app/api/errors/errors.py b/ymir/backend/src/ymir_app/app/api/errors/errors.py index 2d6ee946d6..e05c24748c 100644 --- a/ymir/backend/src/ymir_app/app/api/errors/errors.py +++ b/ymir/backend/src/ymir_app/app/api/errors/errors.py @@ -103,6 +103,11 @@ class FailedToEvaluate(ControllerError): message = "Failed to RUN EVALUATE CMD via Controller" +class PrematureDatasets(APIError): + code = error_codes.PREMATURE_DATASETS + message = "Not All The Datasets Are Ready" + + class RequiredFieldMissing(APIError): code = error_codes.REQUIRED_FIELD_MISSING message = "Required Field Missing" @@ -143,11 +148,26 @@ class ModelNotFound(NotFound): message = "Model Not Found" +class ModelStageNotFound(NotFound): + code = error_codes.MODEL_STAGE_NOT_FOUND + message = "Model Stage Not Found" + + class DatasetEvaluationNotFound(NotFound): code = error_codes.DATASET_EVALUATION_NOT_FOUND message = "Dataset Evaluation Not Found" +class DatasetEvaluationMissingAnnotation(NotFound): + code = error_codes.DATASET_EVALUATION_NO_ANNOTATIONS + message = "Could Not Evaluate Dataset Without Annotations" + + +class DatasetIndexNotReady(APIError): + code = error_codes.DATASET_INDEX_NOT_READY + message = "Dataset Index In MongoDB Not Ready" + + class ModelNotReady(APIError): code = error_codes.MODEL_NOT_READY message = "Model Not Ready" @@ -199,6 +219,12 @@ class InvalidToken(APIError): message = "Invalid Token" +class SystemVersionConflict(APIError): + status_code = 401 + code = error_codes.SYSTEM_VERSION_CONFLICT + message = "System Version Conflict" + + class InvalidScope(APIError): status_code = 401 code = error_codes.INVALID_SCOPE @@ -275,16 +301,11 @@ class ObsoleteTaskStatus(APIError): message = "Obsolete Task Status" -class FailedToUpdateTaskStatus(APIError): +class FailedToUpdateTaskStatusTemporally(APIError): code = error_codes.FAILED_TO_UPDATE_TASK_STATUS message = "Failed to Update Task Status" -class FailedToConnectClickHouse(APIError): - code = error_codes.FAILED_TO_CONNECT_CLICKHOUSE - message = "Failed to Connect ClickHouse" - - class FailedToCreateProject(APIError): code = error_codes.PROJECT_FAILED_TO_CREATE message = "Failed to Create Project" @@ -295,6 +316,11 @@ class ProjectNotFound(NotFound): message = "Project Not Found" +class InvalidProject(APIError): + code = error_codes.INVALID_PROJECT + message = "Invalid Project" + + class DuplicateProjectError(DuplicateError): code = error_codes.PROJECT_DUPLICATED_NAME message = "Duplicated Project Name" @@ -350,6 +376,16 @@ class FailedToUpdateIterationStage(APIError): message = "Failed to Update Iteration Stage" +class IterationStepNotFound(NotFound): + code = error_codes.ITERATION_STEP_NOT_FOUND + message = "IterationStep Not Found" + + +class IterationStepHasFinished(APIError): + code = error_codes.ITERATION_STEP_ALREADY_FINISHED + message = "IterationStep Has Finished" + + class RefuseToProcessMixedOperations(APIError): code = error_codes.REFUSE_TO_PROCESS_MIXED_OPERATIONS message = "Refuse To Process Mixed Operations" @@ -363,3 +399,23 @@ class MissingOperations(APIError): class DatasetsNotInSameGroup(APIError): code = error_codes.DATASETS_NOT_IN_SAME_GROUP message = "Datasets Not in the Same Group" + + +class InvalidModelStageName(APIError): + code = error_codes.INVALID_MODEL_STAGE_NAME + message = "Invalid Model Stage Name" + + +class VizError(APIError): + code = error_codes.VIZ_ERROR + message = "General Viz Error" + + +class FailedToParseVizResponse(VizError): + code = error_codes.FAILED_TO_PARSE_VIZ_RESP + message = "Failed to Parse Viz Response" + + +class VizTimeOut(VizError): + code = error_codes.VIZ_TIMEOUT + message = "Internal Viz Service Timeout" diff --git a/ymir/backend/src/ymir_app/app/check_mir_repo_version.py b/ymir/backend/src/ymir_app/app/check_mir_repo_version.py new file mode 100644 index 0000000000..ed74312905 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/check_mir_repo_version.py @@ -0,0 +1,61 @@ +import logging +import errno +from typing import Optional + +from app.config import settings +from app.utils.ymir_controller import ControllerClient +from common_utils.version import YMIR_VERSION, ymir_salient_version + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def generate_msg_box(msg: str, indent: int = 1, width: Optional[int] = None, title: Optional[str] = None) -> str: + """https://stackoverflow.com/a/58780542/2888638""" + lines = msg.split("\n") + space = " " * indent + if not width: + width = max(map(len, lines)) + box = f'╔{"═" * (width + indent * 2)}╗\n' + if title: + box += f"║{space}{title:<{width}}{space}║\n" + box += f'║{space}{"-" * len(title):<{width}}{space}║\n' + box += "".join([f"║{space}{line:<{width}}{space}║\n" for line in lines]) + box += f'╚{"═" * (width + indent * 2)}╝' + return box + + +def check_mir_repo_version() -> None: + controller = ControllerClient(settings.GRPC_CHANNEL) + try: + sandbox_versions = controller.get_cmd_version() + except ValueError: + logger.exception("[start up] Failed to get mir repo version") + raise + if not sandbox_versions: + # no versions found, fresh workplace. + return + if len(sandbox_versions) > 1: + raise ValueError(f"multiple versions detected: {sandbox_versions}") + if ymir_salient_version(sandbox_versions[0]) != ymir_salient_version(YMIR_VERSION): + raise ValueError(f"mismatched salient version: {sandbox_versions[0]} vs {YMIR_VERSION}.") + + +def main() -> None: + logger.info("[start up] Check existing mir repo version") + try: + check_mir_repo_version() + except ValueError: + logger.error( + generate_msg_box( + "Please upgrade existing mir repo with:\nbash ymir.sh upgrade", + title="ERROR: Incompatible Mir Version", + ) + ) + exit(errno.EPERM) + else: + logger.info("[start up] mir version check passed.") + + +if __name__ == "__main__": + main() diff --git a/ymir/backend/src/ymir_app/app/config.py b/ymir/backend/src/ymir_app/app/config.py index 054a50fec9..d2a0c7afac 100644 --- a/ymir/backend/src/ymir_app/app/config.py +++ b/ymir/backend/src/ymir_app/app/config.py @@ -1,7 +1,7 @@ import secrets -from typing import List, Optional +from typing import Any, Dict, List, Optional -from pydantic import AnyHttpUrl, BaseSettings, EmailStr +from pydantic import AnyHttpUrl, BaseSettings, EmailStr, root_validator class Settings(BaseSettings): @@ -10,7 +10,6 @@ class Settings(BaseSettings): NGINX_PREFIX: str = "" API_V1_STR: str = "/api/v1" DATABASE_URI: str = "sqlite:///app.db" - CLICKHOUSE_URI: str = "clickhouse" TOKEN_URL: str = "/auth/token" GRPC_CHANNEL: str = "controller:50066" ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 40 # 40 hours @@ -23,7 +22,6 @@ class Settings(BaseSettings): REGISTRATION_NEEDS_APPROVAL: bool = False # assets viz - VIZ_HOST: str = "viz:9099" VIZ_TIMEOUT: int = 30 FIRST_ADMIN: EmailStr = "admin@example.com" # type: ignore @@ -43,9 +41,6 @@ class Settings(BaseSettings): # redis BACKEND_REDIS_URL: str = "redis://redis:6379/0" - # graph - MAX_HOPS: int = 5 - # all the emails things EMAILS_ENABLED: bool = False SMTP_TLS: bool = True @@ -87,9 +82,38 @@ class Settings(BaseSettings): # Sample Project configs SAMPLE_PROJECT_KEYWORDS: List[str] = ["person", "cat"] - SAMPLE_PROJECT_TESTING_DATASET_URL: str = "http://web/val.zip" + SAMPLE_PROJECT_VALIDATION_DATASET_URL: str = "http://web/val.zip" SAMPLE_PROJECT_MINING_DATASET_URL: str = "http://web/mining.zip" SAMPLE_PROJECT_MODEL_URL: str = "http://web/683f4fa14d1baa733a87d9644bb0457cbed5aba8" + # OpenPAI + OPENPAI_ENABLED: bool = False + OPENPAI_HOST: Optional[str] = None + OPENPAI_TOKEN: Optional[str] = None + OPENPAI_STORAGE: Optional[str] = None + OPENPAI_USER: Optional[str] = None + OPENPAI_CLUSTER: Optional[str] = None + OPENPAI_GPUTYPE: Optional[str] = None + + @root_validator(pre=True) + def get_openpai_enabled(cls, values: Dict[str, Any]) -> Dict: + values["OPENPAI_ENABLED"] = bool( + values.get("OPENPAI_HOST") + and values.get("OPENPAI_TOKEN") + and values.get("OPENPAI_STORAGE") + and values.get("OPENPAI_USER") + ) + return values + + # ymir_viewer + VIEWER_HOST_PORT: Optional[int] = None + + # migration + MIGRATION_CHECKPOINT: str = "9bb7bb8b71c3" + + # cron job + CRON_MIN_IDLE_TIME: int = 2 * 60 * 1000 # 2 minutes + CRON_CHECK_INTERVAL: int = 10000 # 10 seconds + settings = Settings(_env_file=".env") # type: ignore diff --git a/ymir/backend/src/ymir_app/app/constants/state.py b/ymir/backend/src/ymir_app/app/constants/state.py index 49fddd12c7..4db2434a35 100644 --- a/ymir/backend/src/ymir_app/app/constants/state.py +++ b/ymir/backend/src/ymir_app/app/constants/state.py @@ -1,14 +1,16 @@ +from dataclasses import dataclass from enum import IntEnum from common_utils.percent_log_util import LogState +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 as mirsvrpb class DockerImageType(IntEnum): - unknown = mirsvrpb.TaskType.TaskTypeUnknown - training = mirsvrpb.TaskType.TaskTypeTraining - mining = mirsvrpb.TaskType.TaskTypeMining - infer = mirsvrpb.TaskType.TaskTypeInfer + unknown = mir_cmd_pb.TaskType.TaskTypeUnknown + training = mir_cmd_pb.TaskType.TaskTypeTraining + mining = mir_cmd_pb.TaskType.TaskTypeMining + infer = mir_cmd_pb.TaskType.TaskTypeInfer class DockerImageState(IntEnum): @@ -18,20 +20,20 @@ class DockerImageState(IntEnum): class TaskType(IntEnum): - unknown = mirsvrpb.TaskType.TaskTypeUnknown - training = mirsvrpb.TaskType.TaskTypeTraining - mining = mirsvrpb.TaskType.TaskTypeMining - label = mirsvrpb.TaskType.TaskTypeLabel - filter = mirsvrpb.TaskType.TaskTypeFilter - import_data = mirsvrpb.TaskType.TaskTypeImportData - export_data = mirsvrpb.TaskType.TaskTypeExportData - copy_data = mirsvrpb.TaskType.TaskTypeCopyData - merge = mirsvrpb.TaskType.TaskTypeMerge - infer = mirsvrpb.TaskType.TaskTypeInfer - data_fusion = mirsvrpb.TaskType.TaskTypeFusion - copy_model = mirsvrpb.TaskType.TaskTypeCopyModel - import_model = mirsvrpb.TaskType.TaskTypeImportModel - dataset_infer = mirsvrpb.TaskType.TaskTypeDatasetInfer + unknown = mir_cmd_pb.TaskType.TaskTypeUnknown + training = mir_cmd_pb.TaskType.TaskTypeTraining + mining = mir_cmd_pb.TaskType.TaskTypeMining + label = mir_cmd_pb.TaskType.TaskTypeLabel + filter = mir_cmd_pb.TaskType.TaskTypeFilter + import_data = mir_cmd_pb.TaskType.TaskTypeImportData + export_data = mir_cmd_pb.TaskType.TaskTypeExportData + copy_data = mir_cmd_pb.TaskType.TaskTypeCopyData + merge = mir_cmd_pb.TaskType.TaskTypeMerge + infer = mir_cmd_pb.TaskType.TaskTypeInfer + data_fusion = mir_cmd_pb.TaskType.TaskTypeFusion + copy_model = mir_cmd_pb.TaskType.TaskTypeCopyModel + import_model = mir_cmd_pb.TaskType.TaskTypeImportModel + dataset_infer = mir_cmd_pb.TaskType.TaskTypeDatasetInfer # fixme # create_project is not the type of TASK_CREATE, but empty dataset need a task @@ -78,5 +80,23 @@ class TrainingType(IntEnum): object_detect = 1 +class AnnotationType(IntEnum): + gt = 1 + pred = 2 + + +@dataclass(frozen=True) +class IterationStepTemplate: + name: str + task_type: TaskType + + RunningStates = [TaskState.pending, TaskState.running] FinalStates = [TaskState.done, TaskState.error, TaskState.terminate] +IterationStepTemplates = [ + IterationStepTemplate("prepare_mining", TaskType.data_fusion), + IterationStepTemplate("mining", TaskType.mining), + IterationStepTemplate("label", TaskType.label), + IterationStepTemplate("prepare_training", TaskType.data_fusion), + IterationStepTemplate("training", TaskType.training), +] diff --git a/ymir/backend/src/ymir_app/app/crud/__init__.py b/ymir/backend/src/ymir_app/app/crud/__init__.py index 936e970c15..f5314b832a 100644 --- a/ymir/backend/src/ymir_app/app/crud/__init__.py +++ b/ymir/backend/src/ymir_app/app/crud/__init__.py @@ -4,7 +4,9 @@ from .crud_image_config import image_config from .crud_image_relationship import image_relationship from .crud_iteration import iteration +from .crud_iteration_step import iteration_step from .crud_model import model +from .crud_model_stage import model_stage from .crud_model_group import model_group from .crud_project import project from .crud_role import role diff --git a/ymir/backend/src/ymir_app/app/crud/base.py b/ymir/backend/src/ymir_app/app/crud/base.py index dcaae6ae50..13af5cd6a0 100644 --- a/ymir/backend/src/ymir_app/app/crud/base.py +++ b/ymir/backend/src/ymir_app/app/crud/base.py @@ -87,6 +87,15 @@ def get_multi_by_project(self, db: Session, *, project_id: int) -> List[ModelTyp .all() ) + def get_multi_by_iteration(self, db: Session, *, iteration_id: int) -> List[ModelType]: + return ( + db.query(self.model) + .filter( + self.model.iteration_id == iteration_id, # type: ignore + ) + .all() + ) + def create(self, db: Session, *, obj_in: CreateSchemaType) -> ModelType: obj_in_data = jsonable_encoder(obj_in) db_obj = self.model(**obj_in_data) # type: ignore @@ -95,6 +104,12 @@ def create(self, db: Session, *, obj_in: CreateSchemaType) -> ModelType: db.refresh(db_obj) return db_obj + def batch_create(self, db: Session, *, objs_in: List[CreateSchemaType]) -> List[ModelType]: + db_objs = [self.model(**jsonable_encoder(obj_in)) for obj_in in objs_in] # type: ignore + db.bulk_save_objects(db_objs) + db.commit() + return db_objs + def create_with_user_id(self, db: Session, *, user_id: int, obj_in: CreateSchemaType) -> ModelType: obj_in_data = jsonable_encoder(obj_in) obj_in_data["user_id"] = user_id diff --git a/ymir/backend/src/ymir_app/app/crud/crud_dataset.py b/ymir/backend/src/ymir_app/app/crud/crud_dataset.py index bc8371e9f7..de0c29c24d 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_dataset.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_dataset.py @@ -1,12 +1,12 @@ import json from datetime import datetime from enum import IntEnum -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union from sqlalchemy import and_, desc, not_ from sqlalchemy.orm import Session -from app import schemas +from app import schemas, models from app.constants.state import ResultState, TaskType from app.crud.base import CRUDBase from app.models import Dataset @@ -105,6 +105,7 @@ def create_with_version(self, db: Session, obj_in: DatasetCreate, dest_group_nam db_obj = Dataset( version_num=version_num, hash=obj_in.hash, + description=obj_in.description, source=int(obj_in.source), result_state=int(obj_in.result_state), dataset_group_id=obj_in.dataset_group_id, @@ -118,10 +119,16 @@ def create_with_version(self, db: Session, obj_in: DatasetCreate, dest_group_nam return db_obj def create_as_task_result( - self, db: Session, task: schemas.TaskInternal, dest_group_id: int, dest_group_name: str + self, + db: Session, + task: Union[schemas.TaskInternal, models.Task], + dest_group_id: int, + dest_group_name: Optional[str] = None, + description: Optional[str] = None, ) -> Dataset: dataset_in = DatasetCreate( hash=task.hash, + description=description, source=task.type, dataset_group_id=dest_group_id, project_id=task.project_id, @@ -144,10 +151,7 @@ def finish( if result: dataset.keywords = json.dumps(result["keywords"]) - dataset.ignored_keywords = json.dumps(result["ignored_keywords"]) - dataset.negative_info = json.dumps(result["negative_info"]) - dataset.asset_count = result["asset_count"] - dataset.keyword_count = result["keyword_count"] + dataset.asset_count = result["total_assets_count"] db.add(dataset) db.commit() @@ -173,5 +177,20 @@ def batch_toggle_visibility(self, db: Session, *, ids: List[int], action: str) - db.commit() return objs + def migrate_keywords(self, db: Session, *, id: int) -> Optional[Dataset]: + dataset = self.get(db, id=id) + if not dataset: + return dataset + if not dataset.keywords: + return dataset + keywords = json.loads(dataset.keywords) + if "gt" in keywords: + return dataset + dataset.keywords = json.dumps({"gt": keywords, "pred": {}}) + db.add(dataset) + db.commit() + db.refresh(dataset) + return dataset + dataset = CRUDDataset(Dataset) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py b/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py index 26098e31d7..c4dbf9dba5 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py @@ -5,6 +5,7 @@ from sqlalchemy.orm import Session from app.crud.base import CRUDBase +from app.libs.common import pagination from app.models import DatasetGroup from app.schemas.dataset_group import DatasetGroupCreate, DatasetGroupUpdate @@ -67,7 +68,10 @@ def get_multi_dataset_groups( order_by_column = desc(order_by_column) query = query.order_by(order_by_column) - return query.offset(offset).limit(limit).all(), query.count() + # fixme + # SQLAlchemy do not guarantee precise count + items = query.all() + return pagination(items, offset, limit), len(items) dataset_group = CRUDDatasetGroup(DatasetGroup) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_image.py b/ymir/backend/src/ymir_app/app/crud/crud_image.py index f57457387a..9e4c0ba858 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_image.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_image.py @@ -43,9 +43,9 @@ def get_by_url(self, db: Session, url: str) -> Optional[DockerImage]: query = db.query(self.model).filter(not_(self.model.is_deleted)) return query.filter(self.model.url == url).first() # type: ignore - def docker_name_exists(self, db: Session, url: str) -> bool: + def get_by_name(self, db: Session, name: str) -> Optional[DockerImage]: query = db.query(self.model).filter(not_(self.model.is_deleted)) - return query.filter(self.model.url == url).first() is not None + return query.filter(self.model.name == name).first() # type: ignore def update( self, diff --git a/ymir/backend/src/ymir_app/app/crud/crud_iteration_step.py b/ymir/backend/src/ymir_app/app/crud/crud_iteration_step.py new file mode 100644 index 0000000000..d26cb3dac8 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/crud/crud_iteration_step.py @@ -0,0 +1,56 @@ +import json +from typing import Optional +from sqlalchemy.orm import Session + +from app.crud.base import CRUDBase +from app.constants.state import ResultState +from app.models.iteration_step import IterationStep +from app.schemas.iteration_step import IterationStepCreate, IterationStepUpdate + + +class StepNotFound(Exception): + pass + + +class CRUDIterationStep(CRUDBase[IterationStep, IterationStepCreate, IterationStepUpdate]): + def get_next_step(self, db: Session, id: int) -> Optional[IterationStep]: + step = self.get(db, id) + if not step: + raise StepNotFound() + steps_in_same_iteration = self.get_multi_by_iteration(db, iteration_id=step.iteration_id) + current_idx = [i.id for i in steps_in_same_iteration].index(step.id) + return steps_in_same_iteration[current_idx - 1] + + def start(self, db: Session, id: int, task_id: int) -> IterationStep: + """ + start given iteration_step: + 1. create task + 2. save task result in related step + """ + step = self.get(db, id) + if not step: + raise StepNotFound() + updates = {"task_id": task_id} + return self.update(db, db_obj=step, obj_in=updates) + + def finish(self, db: Session, id: int) -> IterationStep: + step = self.get(db, id) + if not step: + raise StepNotFound() + + if step.state == ResultState.ready: + # save result as task presetting for next_step + next_step = self.get_next_step(db, step.id) + if next_step and step.result: + task_presetting = dict(next_step.presetting) + if step.result_dataset: + task_presetting["dataset_id"] = step.result_dataset.id + if step.result_model: + task_presetting["model_id"] = step.result_model.id + self.update(db, db_obj=next_step, obj_in={"serialized_presetting": json.dumps(task_presetting)}) + + # set current step as finished no matter what + return self.update(db, db_obj=step, obj_in={"is_finished": True}) + + +iteration_step = CRUDIterationStep(IterationStep) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_model.py b/ymir/backend/src/ymir_app/app/crud/crud_model.py index be4c38a68c..a138dd9489 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_model.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_model.py @@ -87,6 +87,7 @@ def create_with_version(self, db: Session, obj_in: ModelCreate, dest_group_name: db_obj = Model( version_num=version_num, hash=obj_in.hash, + description=obj_in.description, source=int(obj_in.source), result_state=int(obj_in.result_state), model_group_id=obj_in.model_group_id, @@ -100,10 +101,16 @@ def create_with_version(self, db: Session, obj_in: ModelCreate, dest_group_name: return db_obj def create_as_task_result( - self, db: Session, task: schemas.TaskInternal, dest_group_id: int, dest_group_name: str + self, + db: Session, + task: schemas.TaskInternal, + dest_group_id: int, + dest_group_name: str, + description: Optional[str] = None, ) -> Model: model_in = ModelCreate( hash=task.hash, + description=description, source=task.type, result_state=ResultState.processing, model_group_id=dest_group_id, @@ -129,6 +136,40 @@ def update_state( db.refresh(model) return model + def update_recommonded_stage( + self, + db: Session, + *, + model_id: int, + stage_id: int, + ) -> Optional[Model]: + model = self.get(db, id=model_id) + if not model: + return model + model.recommended_stage = stage_id + db.add(model) + db.commit() + db.refresh(model) + return model + + def update_recommonded_stage_by_name( + self, + db: Session, + *, + model_id: int, + stage_name: str, + ) -> Optional[Model]: + model = self.get(db, id=model_id) + if not model: + return model + for stage in model.related_stages: + if stage.name == stage_name: + model.recommended_stage = stage.id + db.add(model) + db.commit() + db.refresh(model) + return model + def finish( self, db: Session, @@ -143,6 +184,7 @@ def finish( if result: model.map = result["map"] model.hash = result["hash"] + model.keywords = result["keywords"] model.result_state = int(result_state) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_model_group.py b/ymir/backend/src/ymir_app/app/crud/crud_model_group.py index ef12d5ac41..5b2cf8c24a 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_model_group.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_model_group.py @@ -5,6 +5,7 @@ from sqlalchemy.orm import Session from app.crud.base import CRUDBase +from app.libs.common import pagination from app.models import ModelGroup from app.schemas.model_group import ModelGroupCreate, ModelGroupUpdate @@ -70,7 +71,10 @@ def get_multi_model_groups( order_by_column = desc(order_by_column) query = query.order_by(order_by_column) - return query.offset(offset).limit(limit).all(), query.count() + # fixme + # SQLAlchemy do not guarantee precise count + items = query.all() + return pagination(items, offset, limit), len(items) def get_from_training_dataset(self, db: Session, training_dataset_id: int) -> Optional[ModelGroup]: return ( diff --git a/ymir/backend/src/ymir_app/app/crud/crud_model_stage.py b/ymir/backend/src/ymir_app/app/crud/crud_model_stage.py new file mode 100644 index 0000000000..abafd00375 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/crud/crud_model_stage.py @@ -0,0 +1,25 @@ +from typing import Any, Optional +from sqlalchemy.orm import Session + +from app.api.errors.errors import InvalidModelStageName +from app.crud.base import CRUDBase +from app.models import ModelStage +from app.schemas.model_stage import ModelStageCreate, ModelStageUpdate + + +class CRUDModelStage(CRUDBase[ModelStage, ModelStageCreate, ModelStageUpdate]): + + def get(self, db: Session, id: Any) -> Optional[ModelStage]: + stage = db.query(self.model).filter(self.model.id == id).first() + if stage and not is_valid_model_stage_name(stage.name): + raise InvalidModelStageName() + return stage + + +def is_valid_model_stage_name(name: Optional[str]) -> bool: + if not name: + return False + return name.isidentifier() + + +model_stage = CRUDModelStage(ModelStage) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_project.py b/ymir/backend/src/ymir_app/app/crud/crud_project.py index 1cc9e8edc1..9f255ae581 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_project.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_project.py @@ -7,7 +7,7 @@ from app.config import settings from app.crud.base import CRUDBase -from app.models import Project +from app.models import Project, Iteration from app.schemas.project import ProjectCreate, ProjectUpdate from app.api.errors.errors import ProjectNotFound @@ -34,6 +34,8 @@ def create_project( training_type=obj_in.training_type, training_keywords=training_keywords, description=obj_in.description, + enable_iteration=obj_in.enable_iteration, + is_example=obj_in.is_example, ) db.add(db_obj) db.commit() @@ -107,5 +109,11 @@ def update_resources(self, db: Session, *, project_id: int, project_update: Proj raise ProjectNotFound() return self.update(db, db_obj=project, obj_in=project_update) + def get_previous_iterations(self, db: Session, *, project_id: int, iteration_id: int) -> List[Iteration]: + project = self.get(db, id=project_id) + if not project: + raise ProjectNotFound() + return [iteration for iteration in project.iterations if iteration.id < iteration_id] + project = CRUDProject(Project) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_task.py b/ymir/backend/src/ymir_app/app/crud/crud_task.py index 117c2e0038..3bb7b0c06c 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_task.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_task.py @@ -24,11 +24,19 @@ def create_task( state: int = int(TaskState.pending), percent: float = 0, ) -> Task: + if obj_in.parameters: + parameters = obj_in.parameters.json() + dataset_id = obj_in.parameters.dataset_id + model_stage_id = obj_in.parameters.model_stage_id + else: + parameters, dataset_id, model_stage_id = None, None, None # type: ignore db_obj = Task( name=obj_in.name, type=obj_in.type, config=obj_in.docker_image_config if obj_in.docker_image_config else None, - parameters=obj_in.parameters.json() if obj_in.parameters else None, + parameters=parameters, + dataset_id=dataset_id, + model_stage_id=model_stage_id, project_id=obj_in.project_id, hash=task_hash, user_id=user_id, @@ -166,6 +174,8 @@ def get_multi_tasks( name: Optional[str] = None, type_: Optional[TaskType] = None, state: Optional[TaskState] = None, + dataset_ids: List[int], + model_stage_ids: List[int], start_time: Optional[int] = None, end_time: Optional[int] = None, offset: int = 0, @@ -182,6 +192,12 @@ def get_multi_tasks( query = query.filter(self.model.type == int(type_)) if state: query = query.filter(self.model.state == int(state)) + + if dataset_ids: + query = query.filter(self.model.dataset_id.in_(dataset_ids)) + if model_stage_ids: + query = query.filter(self.model.model_stage_id.in_(model_stage_ids)) + if start_time and end_time: _start_time = datetime.utcfromtimestamp(start_time) _end_time = datetime.utcfromtimestamp(end_time) diff --git a/ymir/backend/src/ymir_app/app/db/init_db.py b/ymir/backend/src/ymir_app/app/db/init_db.py index 5841582a3d..d9bf363c79 100644 --- a/ymir/backend/src/ymir_app/app/db/init_db.py +++ b/ymir/backend/src/ymir_app/app/db/init_db.py @@ -1,3 +1,4 @@ +import time import json from sqlalchemy.orm import Session @@ -55,3 +56,35 @@ def init_db(db: Session) -> None: type=int(config["type"]), ) crud.image_config.create(db, obj_in=image_config_in) + + +def migrate_data(db: Session) -> None: + """ + migrate data from pre-1.3.0 version: + 1. create default model stage + 2. update dataset keywords structure (in {"gt": , "pred": } format) + """ + total_models = crud.model.total(db) + models = crud.model.get_multi(db, limit=total_models) + for model in models: + if model.recommended_stage: + # no need to migrate + continue + if not model.map: + # skip model without map + continue + if model.default_stage: + stage = model.default_stage + else: + stage = crud.model_stage.create( + db, + obj_in=schemas.ModelStageCreate( + name="default_best_stage", map=model.map, timestamp=int(time.time()), model_id=model.id + ), + ) + crud.model.update_recommonded_stage(db, model_id=model.id, stage_id=stage.id) + + total_datasets = crud.dataset.total(db) + datasets = crud.dataset.get_multi(db, limit=total_datasets) + for dataset in datasets: + crud.dataset.migrate_keywords(db, id=dataset.id) diff --git a/ymir/backend/src/ymir_app/app/init_clickhouse.py b/ymir/backend/src/ymir_app/app/init_clickhouse.py deleted file mode 100644 index 4db1a69191..0000000000 --- a/ymir/backend/src/ymir_app/app/init_clickhouse.py +++ /dev/null @@ -1,97 +0,0 @@ -import logging -import os - -from clickhouse_driver import Client, errors - -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -CLICKHOUSE_URI = os.environ.get("CLICKHOUSE_URI", "clickhouse") - -project_table = """\ -CREATE TABLE project -( - created_time DateTime, - user_id Integer, - id Integer, - name String, - training_type LowCardinality(String), - training_keywords Array(LowCardinality(String)) -) -ENGINE = MergeTree() -ORDER BY created_time;""" - - -task_table = """\ -CREATE TABLE task_create -( - created_time DateTime, - user_id Integer, - project_id Integer, - name String, - hash String, - type LowCardinality(String), - dataset_ids Array(Integer), - model_ids Array(Integer), - keyword_ids Array(LowCardinality(String)) -) -ENGINE = MergeTree() -ORDER BY created_time;""" - - -model_table = """\ -CREATE TABLE model -( - created_time DateTime, - user_id Integer, - project_id Integer, - group_id Integer, - id Integer, - name String, - hash String, - map Float, - keyword_ids Array(LowCardinality(String)) -) -ENGINE = MergeTree() -ORDER BY created_time;""" - - -keyword_table = """\ -CREATE TABLE dataset_keywords -( - created_time DateTime, - user_id Integer, - project_id Integer, - group_id Integer, - dataset_id Integer, - keyword_ids Array(LowCardinality(String)) -) -ENGINE = MergeTree() -ORDER BY created_time;""" - - -clickhouse_tables = [project_table, task_table, model_table, keyword_table] - - -def init() -> None: - client = Client(host=CLICKHOUSE_URI) - existing_tables = client.execute("show tables") - - if not existing_tables: - for create_sql in clickhouse_tables: - client.execute(create_sql) - - -def main() -> None: - logger.info("Creating ClickHouse tables") - try: - init() - except (ConnectionRefusedError, errors.NetworkError) as e: - logger.info(f"Fail to init clickhouse client, error: {e}.") - return - logger.info("ClickHouse tables created") - - -if __name__ == "__main__": - main() diff --git a/ymir/backend/src/ymir_app/app/initial_data.py b/ymir/backend/src/ymir_app/app/initial_data.py index c50646d2df..f150e92f98 100644 --- a/ymir/backend/src/ymir_app/app/initial_data.py +++ b/ymir/backend/src/ymir_app/app/initial_data.py @@ -1,7 +1,14 @@ +import sys import logging +from typing import Optional -from app.db.init_db import init_db +from alembic.config import Config +from alembic.script import ScriptDirectory +from alembic.util import CommandError + +from app.db.init_db import init_db, migrate_data from app.db.session import SessionLocal +from app.config import settings logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) @@ -12,11 +19,42 @@ def init() -> None: init_db(db) -def main() -> None: +def should_migrate_data(base_alembic_revision: Optional[str]) -> bool: + if not base_alembic_revision: + return False + base_alembic_revision = base_alembic_revision.split()[0] + script_dir = ScriptDirectory.from_config(Config("alembic.ini")) + try: + revisions = list(script_dir.walk_revisions(base_alembic_revision, settings.MIGRATION_CHECKPOINT)) + except CommandError: + # base alembic revision already newer than MIGRATION_CHECKPOINT + return False + # at least two migration revisions indicate that + # we do migrate from previous version to MIGRATION_CHECKPOINT + return len(revisions) > 1 + + +def migrate() -> None: + db = SessionLocal() + migrate_data(db) + + +def main(base_alembic_revision: Optional[str]) -> None: logger.info("Creating initial data") init() logger.info("Initial data created") + if should_migrate_data(base_alembic_revision): + logger.info("Data Migration started") + migrate() + logger.info("Data migration finished") + else: + logger.info("Data migration skipped") + if __name__ == "__main__": - main() + try: + base_alembic_revision: Optional[str] = sys.argv[1] + except IndexError: + base_alembic_revision = None + main(base_alembic_revision) diff --git a/ymir/backend/src/ymir_app/app/libs/common.py b/ymir/backend/src/ymir_app/app/libs/common.py new file mode 100644 index 0000000000..4015706ed6 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/libs/common.py @@ -0,0 +1,9 @@ +from typing import List, Optional + + +def pagination(items: List, offset: int = 0, limit: Optional[int] = None) -> List: + """ + Mimic the behavior of database query's offset-limit pagination + """ + end = limit + offset if limit is not None else None + return items[offset:end] diff --git a/ymir/backend/src/ymir_app/app/libs/datasets.py b/ymir/backend/src/ymir_app/app/libs/datasets.py index 22ac250e85..162b4dd140 100644 --- a/ymir/backend/src/ymir_app/app/libs/datasets.py +++ b/ymir/backend/src/ymir_app/app/libs/datasets.py @@ -1,3 +1,6 @@ +from collections import ChainMap +from concurrent.futures import ThreadPoolExecutor +from functools import partial from typing import Any, Dict, Optional, List import tempfile import pathlib @@ -10,19 +13,19 @@ from app.api.errors.errors import ( DatasetNotFound, FailedtoCreateDataset, - FailedToEvaluate, + PrematureDatasets, ) from app.config import settings -from app.constants.state import ResultState -from app.utils.files import FailedToDownload, verify_import_path, prepare_imported_dataset_dir, InvalidFileStructure +from app.constants.state import ResultState, TaskState +from app.utils.files import FailedToDownload, locate_import_paths, prepare_downloaded_paths, InvalidFileStructure from app.utils.ymir_viz import VizClient from app.utils.ymir_controller import ( ControllerClient, gen_user_hash, gen_repo_hash, - gen_task_hash, ) from common_utils.labels import UserLabels +from id_definition.error_codes import APIErrorCode as error_codes def import_dataset_in_background( @@ -34,10 +37,30 @@ def import_dataset_in_background( dataset_id: int, ) -> None: try: - _import_dataset(db, controller_client, dataset_import, user_id, task_hash) - except (OSError, BadZipFile, FailedToDownload, FailedtoCreateDataset, DatasetNotFound, InvalidFileStructure): + return _import_dataset(db, controller_client, dataset_import, user_id, task_hash) + except FailedToDownload: + logger.exception("[import dataset] failed to download dataset file") + state_code = error_codes.FAILED_TO_DOWNLOAD + except (InvalidFileStructure, FileNotFoundError): + logger.exception("[import dataset] invalid dataset file structure") + state_code = error_codes.INVALID_DATASET_STRUCTURE + except DatasetNotFound: + logger.exception("[import dataset] source dataset not found, could not copy") + state_code = error_codes.DATASET_NOT_FOUND + except FailedtoCreateDataset: + logger.exception("[import dataset] controller error") + state_code = error_codes.CONTROLLER_ERROR + except BadZipFile: + logger.exception("[import dataset] invalid zip file") + state_code = error_codes.INVALID_DATASET_ZIP_FILE + except Exception: logger.exception("[import dataset] failed to import dataset") - crud.dataset.update_state(db, dataset_id=dataset_id, new_state=ResultState.error) + state_code = error_codes.DATASET_FAILED_TO_IMPORT + + task = crud.task.get_by_hash(db, task_hash) + if task: + crud.task.update_state(db, task=task, new_state=TaskState.error, state_code=str(state_code.value)) + crud.dataset.update_state(db, dataset_id=dataset_id, new_state=ResultState.error) def _import_dataset( @@ -57,15 +80,18 @@ def _import_dataset( "src_repo_id": gen_repo_hash(dataset.project_id), "src_resource_id": dataset.hash, "strategy": dataset_import.strategy, + "clean_dirs": True, } else: paths = ImportDatasetPaths( cache_dir=settings.SHARED_DATA_DIR, input_path=dataset_import.input_path, input_url=dataset_import.input_url ) parameters = { - "annotation_dir": paths.annotation_dir, "asset_dir": paths.asset_dir, + "gt_dir": paths.gt_dir, + "pred_dir": paths.pred_dir, "strategy": dataset_import.strategy, + "clean_dirs": dataset_import.input_path is None, # for path importing, DO NOT clean_dirs } try: @@ -85,60 +111,95 @@ class ImportDatasetPaths: def __init__( self, input_path: Optional[str], input_url: Optional[str], cache_dir: str = settings.SHARED_DATA_DIR ) -> None: - self.cache_dir = cache_dir - self.input_url = input_url - self.input_path = input_path - self._data_dir: Optional[str] = None + self._asset_path: Optional[pathlib.Path] = None + self._gt_path: Optional[pathlib.Path] = None + self._pred_path: Optional[pathlib.Path] = None - @property - def annotation_dir(self) -> str: - return str(self.data_dir / "annotations") + if input_path: + self._asset_path, self._gt_path, self._pred_path = locate_import_paths(input_path) + elif input_url: + temp_dir = tempfile.mkdtemp(prefix="import_dataset_", dir=cache_dir) + self._asset_path, self._gt_path, self._pred_path = prepare_downloaded_paths(input_url, temp_dir) + else: + raise ValueError("input_path or input_url is required") @property def asset_dir(self) -> str: - return str(self.data_dir / "images") + return str(self._asset_path) @property - def data_dir(self) -> pathlib.Path: - if not self._data_dir: - if self.input_path: - verify_import_path(self.input_path) - self._data_dir = self.input_path - elif self.input_url: - temp_dir = tempfile.mkdtemp(prefix="import_dataset_", dir=self.cache_dir) - self._data_dir = prepare_imported_dataset_dir(self.input_url, temp_dir) - else: - raise ValueError("input_path or input_url is required") - return pathlib.Path(self._data_dir) - - -def evaluate_dataset( - controller: ControllerClient, - viz: VizClient, + def gt_dir(self) -> Optional[str]: + return str(self._gt_path) if self._gt_path else None + + @property + def pred_dir(self) -> Optional[str]: + return str(self._pred_path) if self._pred_path else None + + +def evaluate_datasets( + controller_client: ControllerClient, user_id: int, project_id: int, user_labels: UserLabels, confidence_threshold: float, - gt_dataset: models.Dataset, - other_datasets: List[models.Dataset], + iou_threshold: float, + require_average_iou: bool, + need_pr_curve: bool, + main_ck: Optional[str], + dataset_id_mapping: Dict[str, int], ) -> Dict: - # temporary task hash used to fetch evaluation result later - task_hash = gen_task_hash(user_id, project_id) - try: - controller.evaluate_dataset( - user_id, - project_id, - task_hash, - confidence_threshold, - gt_dataset.hash, - [dataset.hash for dataset in other_datasets], - ) - except ValueError: - logger.exception("Failed to evaluate via controller") - raise FailedToEvaluate() - # todo refactor - viz.initialize(user_id=user_id, project_id=project_id, branch_id=task_hash) - evaluations = viz.get_evaluations(user_labels) + if require_average_iou: + iou_thrs_interval = f"{iou_threshold}:0.95:0.05" + logger.info("set iou_thrs_interval to %s because of require_average_iou", iou_thrs_interval) + else: + iou_thrs_interval = str(iou_threshold) + + f_evaluate = partial( + controller_client.evaluate_dataset, + user_id, + project_id, + user_labels, + confidence_threshold, + iou_thrs_interval, + need_pr_curve, + main_ck, + ) + with ThreadPoolExecutor() as executor: + res = executor.map(f_evaluate, dataset_id_mapping.keys()) + + evaluations = ChainMap(*res) - dataset_id_mapping = {dataset.hash: dataset.id for dataset in other_datasets} return {dataset_id_mapping[hash_]: evaluation for hash_, evaluation in evaluations.items()} + + +def ensure_datasets_are_ready(db: Session, dataset_ids: List[int]) -> List[models.Dataset]: + datasets = crud.dataset.get_multi_by_ids(db, ids=dataset_ids) + if len(dataset_ids) != len(datasets): + raise DatasetNotFound() + + if not all(dataset.result_state == ResultState.ready for dataset in datasets): + raise PrematureDatasets() + return datasets + + +def send_keywords_metrics( + user_id: int, + project_id: int, + task_hash: str, + keyword_ids: List[int], + create_time: int, +) -> None: + try: + viz_client = VizClient() + viz_client.initialize(user_id=user_id, project_id=project_id) + viz_client.send_metrics( + metrics_group="task", + id=task_hash, + create_time=create_time, + keyword_ids=keyword_ids, + ) + except Exception: + logger.exception( + "[metrics] failed to send keywords(%s) stats to viewer, continue anyway", + keyword_ids, + ) diff --git a/ymir/backend/src/ymir_app/app/libs/iteration_steps.py b/ymir/backend/src/ymir_app/app/libs/iteration_steps.py new file mode 100644 index 0000000000..9bda14ecbe --- /dev/null +++ b/ymir/backend/src/ymir_app/app/libs/iteration_steps.py @@ -0,0 +1,19 @@ +from typing import List + +from sqlalchemy.orm import Session + +from app import crud, schemas, models +from app.constants.state import IterationStepTemplates + + +def initialize_steps(db: Session, iteration_id: int) -> List[models.IterationStep]: + """ + initialize all the necessary steps upon new iteration + """ + steps = [ + schemas.iteration_step.IterationStepCreate( + iteration_id=iteration_id, name=step_template.name, task_type=step_template.task_type + ) + for step_template in IterationStepTemplates + ] + return crud.iteration_step.batch_create(db, objs_in=steps) diff --git a/ymir/backend/src/ymir_app/app/libs/iterations.py b/ymir/backend/src/ymir_app/app/libs/iterations.py new file mode 100644 index 0000000000..167fcab3ee --- /dev/null +++ b/ymir/backend/src/ymir_app/app/libs/iterations.py @@ -0,0 +1,123 @@ +from typing import Dict, List +from collections import Counter +from concurrent.futures import ThreadPoolExecutor +from functools import partial + +from fastapi.logger import logger +from sqlalchemy.orm import Session + +from app import crud, models +from app.api.errors.errors import IterationNotFound, InvalidProject +from app.utils.ymir_viz import VizClient +from common_utils.labels import UserLabels + + +def calculate_mining_progress( + db: Session, user_labels: UserLabels, user_id: int, project_id: int, iteration_id: int +) -> Dict: + iteration = crud.iteration.get(db, id=iteration_id) + if not iteration: + raise IterationNotFound() + mining_dataset = iteration.mining_dataset + if not mining_dataset: + logger.warning("Attempt to get mining_progress of legacy projects, skip") + raise InvalidProject() + + training_classes = get_training_classes(db, project_id, user_labels) + training_class_ids = list(training_classes.values()) + + viz = VizClient(user_id, project_id, user_labels) + previous_iterations = crud.project.get_previous_iterations(db, project_id=project_id, iteration_id=iteration_id) + if not previous_iterations: + return generate_empty_progress() + + previous_labelled_datasets = crud.dataset.get_multi_by_ids( + db, ids=[i.label_output_dataset_id for i in previous_iterations if i.label_output_dataset_id] + ) + if not previous_labelled_datasets: + logger.warning("previous iteration(%s) got NO labelled datasets", iteration_id) + return generate_empty_progress() + + total_mining_ratio = get_processed_assets_ratio(viz, mining_dataset, previous_labelled_datasets) + class_wise_mining_ratio = get_class_wise_mining_ratio(viz, previous_labelled_datasets, training_classes) + negative_ratio = get_negative_ratio(viz, previous_labelled_datasets, training_class_ids) + + return { + "total_mining_ratio": total_mining_ratio, + "class_wise_mining_ratio": class_wise_mining_ratio, + "negative_ratio": negative_ratio, + } + + +def generate_empty_progress() -> Dict: + return { + "total_mining_ratio": {"processed_assets_count": 0, "total_assets_count": 0}, + "class_wise_mining_ratio": [], + "negative_ratio": {"processed_assets_count": 0, "total_assets_count": 0}, + } + + +def get_training_classes(db: Session, project_id: int, user_labels: UserLabels) -> Dict[str, int]: + project = crud.project.get(db, id=project_id) + if not project or not project.training_targets: + raise InvalidProject() + class_ids = user_labels.id_for_names(names=project.training_targets, raise_if_unknown=True)[0] + return dict(zip(project.training_targets, class_ids)) + + +def get_processed_assets_ratio( + viz: VizClient, + mining_dataset: models.Dataset, + previous_labelled_datasets: List[models.Dataset], +) -> Dict: + count_stats = viz.check_duplication([dataset.hash for dataset in previous_labelled_datasets], mining_dataset.hash) + return { + "processed_assets_count": mining_dataset.asset_count - count_stats["residual_count"][mining_dataset.hash], + "total_assets_count": mining_dataset.asset_count, + } + + +def get_negative_ratio( + viz: VizClient, + previous_labelled_datasets: List[models.Dataset], + training_class_ids: List[int], +) -> Dict[str, int]: + """ + calculate negative assets count against total assets count + """ + f_get_negative_count = partial(viz.get_negative_count, keyword_ids=training_class_ids) + with ThreadPoolExecutor() as executor: + negative_counts = executor.map(f_get_negative_count, [dataset.hash for dataset in previous_labelled_datasets]) + + total_assets_count = sum(dataset.asset_count for dataset in previous_labelled_datasets if dataset.asset_count) + return { + "processed_assets_count": sum(negative_counts), + "total_assets_count": total_assets_count, + } + + +def get_class_wise_mining_ratio( + viz: VizClient, + previous_labelled_datasets: List[models.Dataset], + training_classes: Dict[str, int], +) -> List[Dict]: + """ + calculate assets count for each training_classes, against total assets count + """ + processed_assets_counter: Counter = Counter() + with ThreadPoolExecutor() as executor: + class_wise_counters = executor.map( + viz.get_class_wise_count, [dataset.hash for dataset in previous_labelled_datasets] + ) + for class_wise_counter in class_wise_counters: + processed_assets_counter += Counter(class_wise_counter) + + total_assets_count = sum(dataset.asset_count for dataset in previous_labelled_datasets if dataset.asset_count) + return [ + { + "class_name": class_name, + "processed_assets_count": processed_assets_counter[class_name], + "total_assets_count": total_assets_count, + } + for class_name in training_classes + ] diff --git a/ymir/backend/src/ymir_app/app/libs/keywords.py b/ymir/backend/src/ymir_app/app/libs/keywords.py deleted file mode 100644 index fb953452e6..0000000000 --- a/ymir/backend/src/ymir_app/app/libs/keywords.py +++ /dev/null @@ -1,14 +0,0 @@ -from typing import List - -from app.utils.cache import CacheClient -from app.utils.ymir_controller import ControllerClient -from common_utils.labels import SingleLabel, UserLabels - - -def add_keywords(controller: ControllerClient, cache: CacheClient, user_id: int, keywords: List[str]) -> None: - controller.add_labels( - user_id, - UserLabels(labels=[SingleLabel(name=k) for k in keywords]), - dry_run=False, - ) - cache.delete_personal_keywords_cache() diff --git a/ymir/backend/src/ymir_app/app/libs/labels.py b/ymir/backend/src/ymir_app/app/libs/labels.py new file mode 100644 index 0000000000..cd9ce4d2ee --- /dev/null +++ b/ymir/backend/src/ymir_app/app/libs/labels.py @@ -0,0 +1,51 @@ +from typing import Dict, List + +from app.utils.cache import CacheClient +from app.utils.ymir_controller import ControllerClient +from common_utils.labels import SingleLabel, UserLabels + + +def upsert_labels( + user_id: int, + new_labels: UserLabels, + controller_client: ControllerClient, + dry_run: bool = False, +) -> Dict: + """ + update or insert labels + """ + resp = controller_client.add_labels(user_id, new_labels, dry_run) + + conflict_labels = [] + if resp.get("label_collection"): + for conflict_label in resp["label_collection"]["labels"]: + conflict_labels += [conflict_label["name"]] + conflict_label["aliases"] + + return {"failed": conflict_labels} + + +def ensure_labels_exist( + user_id: int, + user_labels: UserLabels, + controller_client: ControllerClient, + keywords: List[str], + cache: CacheClient, +) -> List[int]: + try: + return keywords_to_class_ids(user_labels, keywords) + except ValueError: + new_labels = UserLabels(labels=[SingleLabel(name=k) for k in keywords]) + upsert_labels(user_id=user_id, new_labels=new_labels, controller_client=controller_client) + user_labels = controller_client.get_labels_of_user(user_id) + cache.delete_personal_keywords_cache() + return keywords_to_class_ids(user_labels, keywords) + + +def keywords_to_class_ids(user_labels: UserLabels, keywords: List[str]) -> List[int]: + class_ids, _ = user_labels.id_for_names(names=keywords, raise_if_unknown=True) + return class_ids + + +def class_ids_to_keywords(user_labels: UserLabels, class_ids: List) -> List[str]: + keywords = user_labels.main_name_for_ids(class_ids=[int(i) for i in class_ids]) + return keywords diff --git a/ymir/backend/src/ymir_app/app/libs/models.py b/ymir/backend/src/ymir_app/app/libs/models.py index 5abe011f5a..704f85e6cc 100644 --- a/ymir/backend/src/ymir_app/app/libs/models.py +++ b/ymir/backend/src/ymir_app/app/libs/models.py @@ -85,3 +85,14 @@ def _import_model( except ValueError as e: logger.exception("[import model] controller error: %s", e) raise FailedtoImportModel() + + +def create_model_stages(db: Session, model_id: int, model_info: Dict) -> None: + stages_in = [ + schemas.ModelStageCreate( + name=stage_name, map=stage_info["mAP"], timestamp=stage_info["timestamp"], model_id=model_id + ) + for stage_name, stage_info in model_info["model_stages"].items() + ] + crud.model_stage.batch_create(db, objs_in=stages_in) + crud.model.update_recommonded_stage_by_name(db, model_id=model_id, stage_name=model_info["best_stage_name"]) diff --git a/ymir/backend/src/ymir_app/app/libs/projects.py b/ymir/backend/src/ymir_app/app/libs/projects.py index d66a0499fc..faefe983d4 100644 --- a/ymir/backend/src/ymir_app/app/libs/projects.py +++ b/ymir/backend/src/ymir_app/app/libs/projects.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, List from fastapi.logger import logger from sqlalchemy.orm import Session @@ -6,6 +6,7 @@ from app.config import settings from app.constants.state import ResultState, TaskType, TaskState from app.utils.ymir_controller import ControllerClient +from app.utils.ymir_viz import VizClient from app.libs.datasets import import_dataset_in_background from app.libs.models import import_model_in_background @@ -105,14 +106,14 @@ def setup_sample_project_in_background( ) # import testing dataset - testing_dataset = setup_dataset_and_group( + validation_dataset = setup_dataset_and_group( db=db, controller_client=controller_client, - group_name=f"{project_name}_testing_dataset", + group_name=f"{project_name}_validation_dataset", project_id=project_id, user_id=user_id, task_type=TaskType.import_data, - input_url=settings.SAMPLE_PROJECT_TESTING_DATASET_URL, + input_url=settings.SAMPLE_PROJECT_VALIDATION_DATASET_URL, ) # import mining dataset @@ -142,8 +143,33 @@ def setup_sample_project_in_background( project_update=schemas.ProjectUpdate( training_dataset_group_id=training_dataset.dataset_group_id, initial_training_dataset_id=training_dataset.id, - testing_dataset_id=testing_dataset.id, + validation_dataset_id=validation_dataset.id, mining_dataset_id=mining_dataset.id, initial_model_id=model.id, ), ) + + +def send_project_metrics( + user_id: int, + project_id: int, + project_name: str, + keyword_ids: List[int], + project_type: str, + create_time: int, +) -> None: + try: + viz_client = VizClient() + viz_client.initialize(user_id=user_id, project_id=project_id) + viz_client.send_metrics( + metrics_group="project", + id=f"{project_id:0>6}", + create_time=create_time, + keyword_ids=keyword_ids, + extra_data={"project_type": project_type}, + ) + except Exception: + logger.exception( + "[metrics] failed to send project(%s) stats to viewer, continue anyway", + project_name, + ) diff --git a/ymir/backend/src/ymir_app/app/libs/redis_stream.py b/ymir/backend/src/ymir_app/app/libs/redis_stream.py index 5fe9895027..62212f7438 100644 --- a/ymir/backend/src/ymir_app/app/libs/redis_stream.py +++ b/ymir/backend/src/ymir_app/app/libs/redis_stream.py @@ -1,8 +1,10 @@ from typing import Any, Callable -import aioredis +import redis.asyncio as redis from fastapi.logger import logger +from app.config import settings + class RedisStream: def __init__( @@ -25,7 +27,7 @@ async def init_group_and_stream(self) -> None: await self._conn.xgroup_create(name=self.stream_name, groupname=self.group_name, mkstream=True) async def connect(self) -> None: - self._conn = await aioredis.from_url(self.redis_uri, decode_responses=True) + self._conn = await redis.from_url(self.redis_uri, decode_responses=True) async def disconnect(self) -> None: await self._conn.close() @@ -35,7 +37,15 @@ async def publish(self, msg: Any) -> None: await self._conn.xadd(self.stream_name, {"payload": msg}) logger.info("[redis stream] enqueue %s", msg) - async def consume(self, f_processor: Callable) -> None: + async def consume( + self, + f_processor: Callable, + block_timeout: int = settings.CRON_CHECK_INTERVAL, + min_idle_time: int = settings.CRON_MIN_IDLE_TIME, + ) -> None: + """ + block_timeout and min_idle_time are both in ms + """ await self.init_group_and_stream() last_id = "0" check_backlog = True @@ -43,21 +53,28 @@ async def consume(self, f_processor: Callable) -> None: # Pick the ID based on the iteration: the first time we want to # read our pending messages, in case we crashed and are recovering. # Once we consumed our history, we can start getting new messages. + _, payloads = await self._conn.xautoclaim( + self.stream_name, self.group_name, self.consumer_name, min_idle_time + ) + if payloads: + logger.info("[cron] received %d pending monitor msgs to retry", len(payloads)) id_ = last_id if check_backlog else ">" - for _, payloads in await self._conn.xreadgroup( + for _, messages in await self._conn.xreadgroup( groupname=self.group_name, consumername=self.consumer_name, streams={self.stream_name: id_}, - block=0, + block=block_timeout, ): - if not payloads: + if not messages: # If we receive an empty reply, it means we were consuming our history # and that the history is now empty. Let's start to consume new messages. - logger.info("handled all the legacy msgs") + logger.info("[cron] handled all the legacy monitor msgs") check_backlog = False continue - logger.info("handling payloads %s", payloads) - successful_ids = await f_processor(payloads) - if successful_ids: - await self._conn.xack(self.stream_name, self.group_name, *successful_ids) - last_id = payloads[-1][0] + last_id = messages[-1][0] + payloads += messages + if payloads: + logger.info("[cron] handling monitor payloads %s", payloads) + successful_ids = await f_processor(payloads) + if successful_ids: + await self._conn.xack(self.stream_name, self.group_name, *successful_ids) diff --git a/ymir/backend/src/ymir_app/app/libs/tasks.py b/ymir/backend/src/ymir_app/app/libs/tasks.py index 86801a06fc..a7e06bfbb5 100644 --- a/ymir/backend/src/ymir_app/app/libs/tasks.py +++ b/ymir/backend/src/ymir_app/app/libs/tasks.py @@ -1,36 +1,38 @@ +from functools import cached_property import json import itertools import asyncio -from typing import Any, Dict, List, Tuple, Optional, Union +from typing import Any, Dict, List, Tuple, Optional import aiohttp -from dataclasses import asdict from fastapi.logger import logger from fastapi.encoders import jsonable_encoder from sqlalchemy.orm import Session from app.api.errors.errors import ( - FailedToUpdateTaskStatus, + DatasetIndexNotReady, + FailedToUpdateTaskStatusTemporally, FailedtoCreateTask, - FailedToConnectClickHouse, ModelNotReady, ModelNotFound, + ModelStageNotFound, TaskNotFound, DatasetNotFound, DatasetGroupNotFound, + RequiredFieldMissing, ) from app.constants.state import ( FinalStates, TaskState, - TaskType, ResultType, ResultState, ) from app.config import settings from app import schemas, crud, models +from app.libs.models import create_model_stages +from app.utils.cache import CacheClient from app.utils.ymir_controller import ControllerClient, gen_task_hash -from app.utils.clickhouse import YmirClickHouse -from app.utils.ymir_viz import VizClient, ModelMetaData, DatasetMetaData +from app.utils.ymir_viz import VizClient from common_utils.labels import UserLabels @@ -43,7 +45,7 @@ async def should_retry(resp: aiohttp.ClientResponse) -> bool: # server returned 500, for example return True response = await resp.json() - if int(response["code"]) == FailedToUpdateTaskStatus.code: + if int(response["code"]) == FailedToUpdateTaskStatusTemporally.code: # server explicitly asked for retry return True return False @@ -97,45 +99,19 @@ def normalize_parameters( raise DatasetNotFound() normalized["validation_dataset_hash"] = validation_dataset.hash - if parameters.model_id: - model = crud.model.get(db, id=parameters.model_id) - if model: - normalized["model_hash"] = model.hash + if parameters.model_stage_id: + model_stage = crud.model_stage.get(db, id=parameters.model_stage_id) + if not model_stage: + raise ModelStageNotFound() + normalized["model_hash"] = model_stage.model.hash # type: ignore + normalized["model_stage_name"] = model_stage.name if parameters.keywords: - normalized["class_ids"] = user_labels.get_class_ids(names_or_aliases=parameters.keywords) - return normalized - + normalized["class_ids"] = user_labels.id_for_names(names=parameters.keywords, raise_if_unknown=True)[0] -def write_clickhouse_metrics( - task_info: schemas.TaskInternal, - dataset_group_id: int, - dataset_id: int, - model_id: Optional[int], - keywords: List[str], -) -> None: - # for task stats - clickhouse = YmirClickHouse() - clickhouse.save_task_parameter( - dt=task_info.create_datetime, - user_id=task_info.user_id, - project_id=task_info.project_id, - name=task_info.name, - hash_=task_info.hash, - type_=TaskType(task_info.type).name, - dataset_ids=[dataset_id], - model_ids=[model_id] if model_id else [], - keywords=keywords, - ) - # for keywords recommendation - clickhouse.save_dataset_keyword( - dt=task_info.create_datetime, - user_id=task_info.user_id, - project_id=task_info.project_id, - group_id=dataset_group_id, - dataset_id=dataset_id, - keywords=keywords, - ) + if parameters.preprocess: + normalized["preprocess"] = parameters.preprocess.json() + return normalized def create_single_task(db: Session, user_id: int, user_labels: UserLabels, task_in: schemas.TaskCreate) -> models.Task: @@ -149,33 +125,36 @@ def create_single_task(db: Session, user_id: int, user_labels: UserLabels, task_ task_id=task_hash, task_type=task_in.type, args=args, - task_parameters=task_in.parameters.json() if task_in.parameters else None, + archived_task_parameters=task_in.parameters.json() if task_in.parameters else None, ) logger.info("[create task] controller response: %s", resp) except ValueError: raise FailedtoCreateTask() + except KeyError: + raise RequiredFieldMissing() task = crud.task.create_task(db, obj_in=task_in, task_hash=task_hash, user_id=user_id) task_info = schemas.TaskInternal.from_orm(task) task_result = TaskResult(db=db, task_in_db=task) - task_result.create(task_in.parameters.dataset_id) + task_result.create(task_in.parameters.dataset_id, task_in.result_description) - try: - write_clickhouse_metrics( - task_info, - args["dataset_group_id"], - args["dataset_id"], - task_in.parameters.model_id, - task_in.parameters.keywords or [], - ) - except FailedToConnectClickHouse: - # clickhouse metric shouldn't block create task process - logger.exception( - "[create task] failed to write task(%s) stats to clickhouse, continue anyway", - task.hash, - ) logger.info("[create task] created task name: %s", task_info.name) + if args.get("class_ids"): + try: + viz_client = VizClient() + viz_client.initialize(user_id=user_id, project_id=task_in.project_id) + viz_client.send_metrics( + metrics_group="task", + id=task_info.hash, + create_time=int(task_info.create_datetime.timestamp()), + keyword_ids=args["class_ids"], + ) + except Exception: + logger.exception( + "[create task] failed to write task(%s) stats to viewer, continue anyway", + task.hash, + ) return task @@ -198,60 +177,44 @@ def __init__( self.viz.initialize( user_id=self.user_id, project_id=self.project_id, - branch_id=self.task_hash, ) + self.cache = CacheClient(user_id=self.user_id) - self._result: Optional[Union[DatasetMetaData, ModelMetaData]] = None - self._user_labels: Optional[Dict] = None + self._result: Optional[Dict] = None - @property + @cached_property def user_labels(self) -> Dict: - """ - Lazy evaluate labels from controller - """ - if self._user_labels is None: - self._user_labels = self.controller.get_labels_of_user(self.user_id) - return self._user_labels + return self.controller.get_labels_of_user(self.user_id) - @property - def model_info(self) -> Optional[ModelMetaData]: + @cached_property + def model_info(self) -> Optional[Dict]: try: - result = self.viz.get_model() + result = self.viz.get_model_info(self.task_hash) except (ModelNotReady, ModelNotFound): - logger.exception("[update task] failed to get model from task") + logger.exception("[update task] failed to get model_info: model not ready") + return None + except Exception: + logger.exception("[update task] failed to get model_info: unknown error") return None else: + logger.info(f"[viewer_model] model_info: {result}") return result - @property - def dataset_info(self) -> DatasetMetaData: - return self.viz.get_dataset(user_labels=self.user_labels) - - @property - def result_info(self) -> Union[DatasetMetaData, ModelMetaData, None]: - if self._result is None: - self._result = self.model_info if self.result_type is ResultType.model else self.dataset_info - return self._result - - def save_model_stats(self, result: ModelMetaData) -> None: - model_in_db = crud.model.get_by_task_id(self.db, task_id=self.task.id) - if not model_in_db: - logger.warning("[update task] found no model to save model stats(%s)", result) - return - project_in_db = crud.project.get(self.db, id=self.project_id) - keywords = schemas.Project.from_orm(project_in_db).training_keywords - clickhouse = YmirClickHouse() - clickhouse.save_model_result( - model_in_db.create_datetime, - self.user_id, - model_in_db.project_id, - model_in_db.model_group_id, - model_in_db.id, - model_in_db.name, - result.hash, - result.map, - keywords, - ) + @cached_property + def dataset_info(self) -> Optional[Dict]: + try: + dataset_info = self.viz.get_dataset_info( + self.task_hash, user_labels=self.user_labels, check_index_status=True + ) + except DatasetIndexNotReady: + raise FailedToUpdateTaskStatusTemporally() + except Exception: + logger.exception("[update task] failed to get dataset_info, check viz log") + return None + if dataset_info["new_types_added"]: + logger.info("[update task] delete user keywords cache for new keywords from dataset") + self.cache.delete_personal_keywords_cache() + return dataset_info def get_dest_group_info(self, dataset_id: int) -> Tuple[int, str]: if self.result_type is ResultType.dataset: @@ -282,14 +245,16 @@ def get_dest_group_info(self, dataset_id: int) -> Tuple[int, str]: ) return model_group.id, model_group.name - def create(self, dataset_id: int) -> Dict[str, Dict]: + def create(self, dataset_id: int, description: Optional[str] = None) -> Dict[str, Dict]: dest_group_id, dest_group_name = self.get_dest_group_info(dataset_id) if self.result_type is ResultType.dataset: - dataset = crud.dataset.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name) + dataset = crud.dataset.create_as_task_result( + self.db, self.task, dest_group_id, dest_group_name, description + ) logger.info("[create task] created new dataset(%s) as task result", dataset.name) return {"dataset": jsonable_encoder(dataset)} elif self.result_type is ResultType.model: - model = crud.model.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name) + model = crud.model.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name, description) logger.info("[create task] created new model(%s) as task result", model.name) return {"model": jsonable_encoder(model)} else: @@ -330,46 +295,55 @@ def update( ) def update_task_result(self, task_result: schemas.TaskUpdateStatus, task_in_db: models.Task) -> None: + """ + task_result: task update from monitor + task_in_db: is required to add back model config to task + """ if self.result_type is ResultType.dataset: - crud_func = crud.dataset + self.update_dataset_result(task_result) elif self.result_type is ResultType.model: - crud_func = crud.model # type: ignore - else: - logger.info("[update task] no task result to update") - return + self.update_model_result(task_result, task_in_db) - result_record = crud_func.get_by_task_id(self.db, task_id=self.task.id) - if not result_record: - logger.error("[update task] task result record not found, skip") + def update_model_result(self, task_result: schemas.TaskUpdateStatus, task_in_db: models.Task) -> None: + """ + Criterion for ready model: viewer returns valid model_info + """ + model_record = crud.model.get_by_task_id(self.db, task_id=self.task.id) + if not model_record: + logger.error("[update task] task result (model) not found, skip") return - - if self.result_type is ResultType.model and self.model_info: - # special path for model - # as long as we can get model_info, set model as ready and - # save related task parameters and config accordingly + model_info = self.model_info + if model_info: + # as long as model info is ready, regardless of task status, just set model as ready crud.task.update_parameters_and_config( self.db, task=task_in_db, - parameters=self.model_info.task_parameters, - config=json.dumps(self.model_info.executor_config), + parameters=model_info["task_parameters"], + config=json.dumps(model_info["executor_config"]), ) - crud.model.finish(self.db, result_record.id, result_state=ResultState.ready, result=asdict(self.model_info)) - try: - self.save_model_stats(self.model_info) - except FailedToConnectClickHouse: - logger.exception("Failed to write model stats to clickhouse, continue anyway") - return + crud.model.finish(self.db, model_record.id, result_state=ResultState.ready, result=model_info) + create_model_stages(self.db, model_record.id, model_info) + else: + crud.model.finish(self.db, model_record.id, result_state=ResultState.error) - if task_result.state is TaskState.done: - crud_func.finish( + def update_dataset_result(self, task_result: schemas.TaskUpdateStatus) -> None: + """ + Criterion for ready dataset: task state is DONE and viewer returns valid dataset_info + """ + dataset_record = crud.dataset.get_by_task_id(self.db, task_id=self.task.id) + if not dataset_record: + logger.error("[update task] task result (dataset) not found, skip") + return + if task_result.state is TaskState.done and self.dataset_info: + crud.dataset.finish( self.db, - result_record.id, + dataset_record.id, result_state=ResultState.ready, - result=asdict(self.result_info), + result=self.dataset_info, ) else: - crud_func.finish( + crud.dataset.finish( self.db, - result_record.id, + dataset_record.id, result_state=ResultState.error, ) diff --git a/ymir/backend/src/ymir_app/app/main.py b/ymir/backend/src/ymir_app/app/main.py index 349ae33ca3..c45ae64235 100644 --- a/ymir/backend/src/ymir_app/app/main.py +++ b/ymir/backend/src/ymir_app/app/main.py @@ -13,10 +13,12 @@ from fastapi_cache import FastAPICache from fastapi_cache.backends.redis import RedisBackend from fastapi_socketio import SocketManager +from fastapi_health import health from sentry_sdk.integrations.asgi import SentryAsgiMiddleware from starlette.exceptions import HTTPException from starlette.middleware.cors import CORSMiddleware from starlette.responses import HTMLResponse +from starlette_exporter import PrometheusMiddleware, handle_metrics from app.api.api_v1.api import api_router from app.api.errors import errors @@ -31,8 +33,12 @@ ) app.mount("/static", StaticFiles(directory="static"), name="static") +app.add_middleware(PrometheusMiddleware) +app.add_route("/metrics", handle_metrics) +app.add_api_route("/health", health([])) + if settings.SENTRY_DSN: - sentry_sdk.init(dsn=settings.SENTRY_DSN) + sentry_sdk.init(dsn=settings.SENTRY_DSN) # type: ignore app.add_middleware(SentryAsgiMiddleware) if settings.BACKEND_CORS_ORIGINS: @@ -96,6 +102,18 @@ async def shutdown() -> None: uvicorn_access_logger.handlers = gunicorn_error_logger.handlers logging.getLogger("multipart").setLevel(logging.WARNING) + +class EndpointFilter(logging.Filter): + def filter(self, record: logging.LogRecord) -> bool: + for filter_key in ["/health", "/metrics"]: + if record.getMessage().find(filter_key) != -1: + return False + return True + + +uvicorn_access_logger.addFilter(EndpointFilter()) + + if __name__ == "__main__": import uvicorn diff --git a/ymir/backend/src/ymir_app/app/models/__init__.py b/ymir/backend/src/ymir_app/app/models/__init__.py index 9c6372d0c6..cf3318685c 100644 --- a/ymir/backend/src/ymir_app/app/models/__init__.py +++ b/ymir/backend/src/ymir_app/app/models/__init__.py @@ -4,8 +4,10 @@ from .image_config import DockerImageConfig from .image_relationship import DockerImageRelationship from .iteration import Iteration +from .iteration_step import IterationStep from .model import Model from .model_group import ModelGroup +from .model_stage import ModelStage from .project import Project from .role import Role from .task import Task diff --git a/ymir/backend/src/ymir_app/app/models/image.py b/ymir/backend/src/ymir_app/app/models/image.py index 4145821e28..7aa2703373 100644 --- a/ymir/backend/src/ymir_app/app/models/image.py +++ b/ymir/backend/src/ymir_app/app/models/image.py @@ -29,6 +29,7 @@ class DockerImage(Base): primaryjoin="foreign(DockerImageConfig.image_id)==DockerImage.id", uselist=True, ) + enable_livecode = Column(Boolean, default=False, nullable=False) is_shared = Column(Boolean, default=False, nullable=False) is_deleted = Column(Boolean, default=False, nullable=False) create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) diff --git a/ymir/backend/src/ymir_app/app/models/iteration.py b/ymir/backend/src/ymir_app/app/models/iteration.py index 43555608fd..f79f55fff3 100644 --- a/ymir/backend/src/ymir_app/app/models/iteration.py +++ b/ymir/backend/src/ymir_app/app/models/iteration.py @@ -1,11 +1,14 @@ from datetime import datetime -from typing import List +from typing import List, Optional from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String +from sqlalchemy.orm import relationship from app.config import settings from app.db.base_class import Base from app.models.task import Task # noqa +from app.models.dataset import Dataset # noqa +from app.models.iteration_step import IterationStep class Iteration(Base): @@ -16,12 +19,14 @@ class Iteration(Base): current_stage = Column(SmallInteger, index=True, default=0, nullable=False) previous_iteration = Column(Integer, index=True, default=0, nullable=False) + mining_dataset_id = Column(Integer) mining_input_dataset_id = Column(Integer) mining_output_dataset_id = Column(Integer) label_output_dataset_id = Column(Integer) training_input_dataset_id = Column(Integer) training_output_model_id = Column(Integer) - testing_dataset_id = Column(Integer) + training_output_model_stage_id = Column(Integer) + validation_dataset_id = Column(Integer) user_id = Column(Integer, index=True, nullable=False) project_id = Column(Integer, index=True, nullable=False) @@ -35,6 +40,20 @@ class Iteration(Base): nullable=False, ) + mining_dataset = relationship( + "Dataset", + primaryjoin="foreign(Dataset.id)==Iteration.mining_dataset_id", + uselist=False, + viewonly=True, + ) + + iteration_steps = relationship( + "IterationStep", + primaryjoin="foreign(IterationStep.iteration_id)==Iteration.id", + uselist=True, + viewonly=True, + ) + @property def referenced_dataset_ids(self) -> List[int]: datasets = [ @@ -42,10 +61,19 @@ def referenced_dataset_ids(self) -> List[int]: self.mining_output_dataset_id, self.label_output_dataset_id, self.training_input_dataset_id, - self.testing_dataset_id, + self.validation_dataset_id, ] return [dataset for dataset in datasets if dataset is not None] @property def referenced_model_ids(self) -> List[int]: return [self.training_output_model_id] if self.training_output_model_id else [] + + @property + def current_step(self) -> Optional[IterationStep]: + """ + list all the remaining steps in current iteration and return the first one when possible + if no remaining steps exist, current iteration should have finished + """ + remaining_steps = sorted(filter(lambda i: not i.is_finished, self.iteration_steps), key=lambda i: i.id) + return remaining_steps[0] if remaining_steps else None diff --git a/ymir/backend/src/ymir_app/app/models/iteration_step.py b/ymir/backend/src/ymir_app/app/models/iteration_step.py new file mode 100644 index 0000000000..9557904da7 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/models/iteration_step.py @@ -0,0 +1,73 @@ +from datetime import datetime +import json +from typing import Dict, Optional, Union + +from sqlalchemy import Boolean, Column, DateTime, Integer, String, Text +from sqlalchemy.orm import relationship + +from app.db.base_class import Base +from app.models.task import Task # noqa +from app.models.dataset import Dataset # noqa +from app.models.model import Model # noqa +from app.config import settings + + +class IterationStep(Base): + __tablename__ = "iteration_step" + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + name = Column(String(settings.STRING_LEN_LIMIT), index=True, nullable=False) + iteration_id = Column(Integer, index=True, nullable=False) + + task_type = Column(Integer, index=True, nullable=False) + task_id = Column(Integer, index=True) + serialized_presetting = Column(Text(settings.TEXT_LEN_LIMIT)) + + is_finished = Column(Boolean, default=False, nullable=False) + is_deleted = Column(Boolean, default=False, nullable=False) + create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) + update_datetime = Column( + DateTime, + default=datetime.utcnow, + onupdate=datetime.utcnow, + nullable=False, + ) + + task = relationship( + "Task", + primaryjoin="foreign(Task.id)==IterationStep.task_id", + uselist=False, + viewonly=True, + ) + + @property + def percent(self) -> Optional[float]: + return self.task.percent if self.task else None # type: ignore + + @property + def result_dataset(self) -> Optional[Dataset]: + if not self.task: + return None + return self.task.result_dataset # type: ignore + + @property + def result_model(self) -> Optional[Dataset]: + if not self.task: + return None + return self.task.result_model # type: ignore + + @property + def result(self) -> Optional[Union[Dataset, Model]]: + return self.result_dataset or self.result_model + + @property + def state(self) -> Optional[int]: + """ + for each step in iteration, + we only care about related dataset or model's state, + task's state considered internal + """ + return self.result.result_state if self.result else None + + @property + def presetting(self) -> Dict: + return json.loads(self.serialized_presetting) if self.serialized_presetting else {} diff --git a/ymir/backend/src/ymir_app/app/models/model.py b/ymir/backend/src/ymir_app/app/models/model.py index 5e13c4ecaf..cdfa9ef463 100644 --- a/ymir/backend/src/ymir_app/app/models/model.py +++ b/ymir/backend/src/ymir_app/app/models/model.py @@ -1,19 +1,13 @@ from datetime import datetime +from typing import Optional -from sqlalchemy import ( - Boolean, - Column, - DateTime, - Float, - Integer, - String, - SmallInteger, -) +from sqlalchemy import Boolean, Column, DateTime, Float, Integer, String, SmallInteger, Text from sqlalchemy.orm import relationship from app.config import settings from app.db.base_class import Base from app.models.task import Task # noqa +from app.models.model_stage import ModelStage # noqa class Model(Base): @@ -30,6 +24,8 @@ class Model(Base): project_id = Column(Integer, index=True, nullable=False) task_id = Column(Integer, index=True, nullable=False) + keywords = Column(Text(settings.TEXT_LEN_LIMIT)) + # imported/copied model has no mAP map = Column(Float, nullable=True) @@ -40,6 +36,21 @@ class Model(Base): uselist=False, viewonly=True, ) + related_stages = relationship( + "ModelStage", + primaryjoin="foreign(ModelStage.model_id)==Model.id", + backref="model", + uselist=True, + viewonly=True, + ) + recommended_stage = Column(Integer, nullable=True) + + default_stage = relationship( + "ModelStage", + primaryjoin="foreign(ModelStage.model_id)==Model.id", + uselist=False, + viewonly=True, + ) is_visible = Column(Boolean, default=True, nullable=False) is_deleted = Column(Boolean, default=False, nullable=False) @@ -58,3 +69,7 @@ def group_name(self) -> str: @property def name(self) -> str: return "_".join([self.group_name, str(self.version_num)]) + + @property + def default_stage_name(self) -> Optional[str]: + return self.default_stage.name if self.default_stage else None diff --git a/ymir/backend/src/ymir_app/app/models/model_stage.py b/ymir/backend/src/ymir_app/app/models/model_stage.py new file mode 100644 index 0000000000..e49df9a8e6 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/models/model_stage.py @@ -0,0 +1,30 @@ +from datetime import datetime + +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Float, + Integer, + String, + UniqueConstraint +) + +from app.config import settings +from app.db.base_class import Base + + +class ModelStage(Base): + __tablename__ = "model_stage" + __table_args__ = (UniqueConstraint('model_id', 'name'),) + id = Column(Integer, primary_key=True, index=True, autoincrement=True) + name = Column(String(settings.STRING_LEN_LIMIT), index=True) + timestamp = Column(Integer, nullable=False) + map = Column(Float, nullable=True) + model_id = Column(Integer, index=True, nullable=False) + + is_deleted = Column(Boolean, default=False, nullable=False) + create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) + update_datetime = Column( + DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False, + ) diff --git a/ymir/backend/src/ymir_app/app/models/project.py b/ymir/backend/src/ymir_app/app/models/project.py index 6090d2080b..24fd78fdf7 100644 --- a/ymir/backend/src/ymir_app/app/models/project.py +++ b/ymir/backend/src/ymir_app/app/models/project.py @@ -1,4 +1,5 @@ from datetime import datetime +import json from typing import List from sqlalchemy import ( @@ -14,12 +15,14 @@ from sqlalchemy.orm import relationship from app.config import settings +from app.constants.state import ResultState, TaskState, TaskType from app.db.base_class import Base from app.models.dataset import Dataset # noqa from app.models.dataset_group import DatasetGroup # noqa from app.models.iteration import Iteration # noqa from app.models.model import Model # noqa from app.models.model_group import ModelGroup # noqa +from app.models.task import Task # noqa class Project(Base): @@ -39,10 +42,14 @@ class Project(Base): training_keywords = Column(Text(settings.TEXT_LEN_LIMIT), nullable=False) training_dataset_group_id = Column(Integer, index=True) mining_dataset_id = Column(Integer, index=True) - testing_dataset_id = Column(Integer, index=True) + validation_dataset_id = Column(Integer, index=True) + testing_dataset_ids = Column(String(settings.LONG_STRING_LEN_LIMIT)) initial_model_id = Column(Integer, index=True) + initial_model_stage_id = Column(Integer, index=True) initial_training_dataset_id = Column(Integer, index=True) + candidate_training_dataset_id = Column(Integer) + enable_iteration = Column(Boolean, default=True, nullable=False) # for project haven't finish initialization, current_iteration_id is None current_iteration_id = Column(Integer) user_id = Column(Integer, index=True, nullable=False) @@ -59,9 +66,9 @@ class Project(Base): uselist=True, viewonly=True, ) - testing_dataset = relationship( + validation_dataset = relationship( "Dataset", - primaryjoin="foreign(Dataset.id)==Project.testing_dataset_id", + primaryjoin="foreign(Dataset.id)==Project.validation_dataset_id", uselist=False, viewonly=True, ) @@ -83,6 +90,12 @@ class Project(Base): uselist=True, viewonly=True, ) + tasks = relationship( + "Task", + primaryjoin="foreign(Task.project_id)==Project.id", + uselist=True, + viewonly=True, + ) current_iteration = relationship( "Iteration", primaryjoin="foreign(Iteration.id)==Project.current_iteration_id", @@ -108,11 +121,33 @@ class Project(Base): @property def dataset_count(self) -> int: - return len(self.datasets) + # Only ready and visible datasets count. + # stick to `dataset_count` for compatibility + ready_datasets = [d for d in self.datasets if d.result_state == ResultState.ready and d.is_visible] + return len(ready_datasets) @property def model_count(self) -> int: - return len(self.models) + # Only ready models count. + # stick to `model_count` for compatibility + ready_models = [model for model in self.models if model.result_state == ResultState.ready and model.is_visible] + return len(ready_models) + + @property + def total_asset_count(self) -> int: + return sum([dataset.asset_count for dataset in self.datasets if dataset.asset_count]) + + @property + def training_tasks(self) -> List[Task]: + return [task for task in self.tasks if task.type == TaskType.training] + + @property + def running_task_count(self) -> int: + return sum([task.state == TaskState.running for task in self.training_tasks]) + + @property + def total_task_count(self) -> int: + return len(self.training_tasks) @property def referenced_dataset_ids(self) -> List[int]: @@ -122,7 +157,14 @@ def referenced_dataset_ids(self) -> List[int]: - datasets and models of current iteration - all the training dataset of all the iterations """ - project_dataset_ids = [self.testing_dataset_id, self.mining_dataset_id, self.initial_training_dataset_id] + testing_dataset_ids = [int(i) for i in self.testing_dataset_ids.split(",")] if self.testing_dataset_ids else [] + project_dataset_ids = [ + self.validation_dataset_id, + self.mining_dataset_id, + self.initial_training_dataset_id, + self.candidate_training_dataset_id, + *testing_dataset_ids, + ] current_iteration_dataset_ids = self.current_iteration.referenced_dataset_ids if self.current_iteration else [] all_iterations_training_dataset_ids = [i.training_input_dataset_id for i in self.iterations] dataset_ids = filter( @@ -140,3 +182,7 @@ def referenced_model_ids(self) -> List[int]: current_iteration_model_ids + [self.initial_model_id] + all_iterations_training_model_ids, # type: ignore ) return list(set(model_ids)) + + @property + def training_targets(self) -> List[str]: + return json.loads(self.training_keywords) if self.training_keywords else [] diff --git a/ymir/backend/src/ymir_app/app/models/task.py b/ymir/backend/src/ymir_app/app/models/task.py index 28514aebda..f23b03c530 100644 --- a/ymir/backend/src/ymir_app/app/models/task.py +++ b/ymir/backend/src/ymir_app/app/models/task.py @@ -32,6 +32,9 @@ class Task(Base): user_id = Column(Integer, index=True, nullable=False) project_id = Column(Integer, index=True, nullable=False) + dataset_id = Column(Integer, index=True, nullable=True) + model_stage_id = Column(Integer, index=True, nullable=True) + is_terminated = Column(Boolean, default=False, nullable=False) is_deleted = Column(Boolean, default=False, nullable=False) last_message_datetime = Column(DATETIME(fsp=6), default=datetime.utcnow, nullable=False) diff --git a/ymir/backend/src/ymir_app/app/models/user.py b/ymir/backend/src/ymir_app/app/models/user.py index 4cb2108e3d..fe8b2a0837 100644 --- a/ymir/backend/src/ymir_app/app/models/user.py +++ b/ymir/backend/src/ymir_app/app/models/user.py @@ -3,6 +3,7 @@ from sqlalchemy import Boolean, Column, DateTime, Integer, String from app.db.base_class import Base +from app.config import settings class User(Base): @@ -15,6 +16,9 @@ class User(Base): hashed_password = Column(String(200), nullable=False) state = Column(Integer, index=True, default=1) role = Column(Integer, index=True, default=1) + organization = Column(String(settings.STRING_LEN_LIMIT)) + scene = Column(String(settings.LONG_STRING_LEN_LIMIT)) + is_deleted = Column(Boolean(), default=False) last_login_datetime = Column(DateTime, nullable=True) create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) diff --git a/ymir/backend/src/ymir_app/app/schemas/__init__.py b/ymir/backend/src/ymir_app/app/schemas/__init__.py index 26fa4c4d1f..b3818840e4 100644 --- a/ymir/backend/src/ymir_app/app/schemas/__init__.py +++ b/ymir/backend/src/ymir_app/app/schemas/__init__.py @@ -5,10 +5,12 @@ DatasetImport, DatasetOut, DatasetPaginationOut, - DatasetsOut, DatasetUpdate, - ImportStrategy, + DatasetsOut, DatasetsFusionParameter, + DatasetsAnalysesOut, + DatasetInfoOut, + ImportStrategy, ) from .dataset_group import ( DatasetGroupOut, @@ -16,7 +18,6 @@ DatasetGroupUpdate, DatasetGroupPaginationOut, ) -from .graph import Graph, GraphOut from .image import ( DockerImage, DockerImageCreate, @@ -29,6 +30,7 @@ from .image_relationship import ImageRelationshipsCreate, ImageRelationshipsOut from .inference import InferenceCreate, InferenceOut from .iteration import IterationsOut, IterationOut, IterationCreate, IterationUpdate +from .iteration_step import IterationStepOut, IterationStepsOut from .keyword import ( KeywordOut, KeywordsCreate, @@ -44,7 +46,9 @@ ModelPaginationOut, ModelsOut, ModelUpdate, + StageChange, ) +from .model_stage import ModelStage, ModelStageOut, ModelStagesOut, ModelStageCreate, ModelStageUpdate from .model_group import ( ModelGroupOut, ModelGroupCreate, @@ -69,6 +73,7 @@ StatsPopularKeywordsOut, StatsPopularModelsOut, StatsProjectsCountOut, + StatsMetricsQueryOut, ) from .sys_info import SysInfo, SysInfoOut from .task import ( diff --git a/ymir/backend/src/ymir_app/app/schemas/asset.py b/ymir/backend/src/ymir_app/app/schemas/asset.py index 8257196e82..e9abc20b78 100644 --- a/ymir/backend/src/ymir_app/app/schemas/asset.py +++ b/ymir/backend/src/ymir_app/app/schemas/asset.py @@ -25,9 +25,11 @@ class AssetUpdate(AssetBase): class Asset(AssetBase): url: str - annotations: Optional[List[Dict]] metadata: Optional[Dict] keywords: Optional[List[str]] + gt: Optional[List[Dict]] + pred: Optional[List[Dict]] + cks: Optional[Dict] class AssetOut(Common): diff --git a/ymir/backend/src/ymir_app/app/schemas/dataset.py b/ymir/backend/src/ymir_app/app/schemas/dataset.py index 534eeeb1bb..3628ecee02 100644 --- a/ymir/backend/src/ymir_app/app/schemas/dataset.py +++ b/ymir/backend/src/ymir_app/app/schemas/dataset.py @@ -1,8 +1,8 @@ import enum import json -from typing import Any, Dict, List, Optional +from typing import Any, Dict, List, Optional, Union -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, Field, validator, root_validator from app.constants.state import ResultState, TaskType from app.schemas.common import ( @@ -19,6 +19,7 @@ class ImportStrategy(enum.IntEnum): no_annotations = 1 ignore_unknown_annotations = 2 stop_upon_unknown_annotations = 3 + add_unknown_annotations = 4 class MergeStrategy(enum.IntEnum): @@ -36,8 +37,6 @@ class DatasetBase(BaseModel): # task_id haven't created yet # user_id can be parsed from token keywords: Optional[str] - ignored_keywords: Optional[str] - negative_info: Optional[str] asset_count: Optional[int] keyword_count: Optional[int] @@ -74,6 +73,7 @@ class DatasetCreate(DatasetBase): hash: str = Field(description="related task hash") task_id: int user_id: int + description: Optional[str] class Config: use_enum_values = True @@ -84,8 +84,6 @@ class DatasetUpdate(BaseModel): description: Optional[str] result_state: Optional[ResultState] keywords: Optional[str] - ignored_keywords: Optional[str] - negative_info: Optional[str] asset_count: Optional[int] keyword_count: Optional[int] @@ -104,14 +102,16 @@ class Config: orm_mode = True +class DatasetInDB(DatasetInDBBase): + pass + + # Properties to return to caller class Dataset(DatasetInDBBase): keywords: Optional[str] - ignored_keywords: Optional[str] - negative_info: Optional[str] # make sure all the json dumped value is unpacked before returning to caller - @validator("keywords", "ignored_keywords", "negative_info") + @validator("keywords") def unpack(cls, v: Optional[str]) -> Dict[str, int]: if v is None: return {} @@ -135,6 +135,65 @@ class DatasetsOut(Common): result: List[Dataset] +class DatasetAnnotationHist(BaseModel): + quality: List[Dict] + area: List[Dict] + area_ratio: List[Dict] + + +class DatasetAnnotation(BaseModel): + keywords: Dict[str, int] + negative_assets_count: int + tags_count_total: Dict # box tags in first level + tags_count: Dict # box tags in second level + + hist: Optional[DatasetAnnotationHist] + annos_count: Optional[int] + ave_annos_count: Optional[float] + + eval_class_ids: Optional[List] + + +class DatasetInfo(DatasetInDBBase): + gt: Optional[DatasetAnnotation] + pred: Optional[DatasetAnnotation] + + keywords: Optional[Any] + cks_count: Optional[Dict] + cks_count_total: Optional[Dict] + + total_assets_count: Optional[int] + + # make sure all the json dumped value is unpacked before returning to caller + @validator("keywords") + def unpack(cls, v: Optional[Union[str, Dict]]) -> Dict[str, int]: + if v is None: + return {} + if isinstance(v, str): + return json.loads(v) + return v + + +class DatasetInfoOut(Common): + result: DatasetInfo + + +class DatasetHist(BaseModel): + bytes: List[Dict] + area: List[Dict] + quality: List[Dict] + hw_ratio: List[Dict] + + +class DatasetAnalysis(DatasetInfo): + total_assets_mbytes: Optional[int] + hist: Optional[DatasetHist] + + +class DatasetsAnalysesOut(Common): + result: List[DatasetAnalysis] + + class DatasetPaginationOut(Common): result: DatasetPagination @@ -154,14 +213,65 @@ class DatasetsFusionParameter(RequestParameterBase): sampling_count: int = 0 + description: Optional[str] = Field(description="description for fusion result") + class DatasetEvaluationCreate(BaseModel): project_id: int - gt_dataset_id: int - other_dataset_ids: List[int] + dataset_ids: List[int] confidence_threshold: float + iou_threshold: float + require_average_iou: bool = False + need_pr_curve: bool = True + main_ck: Optional[str] = None class DatasetEvaluationOut(Common): # dict of dataset_id to evaluation result result: Dict[int, Dict] + + +class MultiDatasetsWithProjectID(BaseModel): + project_id: int + dataset_ids: List[int] + + +class DatasetCheckDuplicationOut(Common): + result: int + + +class DatasetMergeCreate(BaseModel): + project_id: int + dest_group_id: Optional[int] + dest_group_name: Optional[str] + include_datasets: List[int] + exclude_datasets: Optional[List[int]] + merge_strategy: MergeStrategy = Field( + MergeStrategy.prefer_newest, description="strategy to merge multiple datasets" + ) + description: Optional[str] = Field(description="description for merge result") + + @root_validator + def confine_parameters(cls, values: Any) -> Any: + if values.get("dest_group_id") is None and values.get("dest_group_name") is None: + raise ValueError("dest_group_id and dest_group_name cannot both be None") + return values + + +class DatasetFilterCreate(BaseModel): + project_id: int + dataset_id: int + include_keywords: Optional[List[str]] + exclude_keywords: Optional[List[str]] + sampling_count: Optional[int] + description: Optional[str] = Field(description="description for filter result") + + @root_validator + def confine_parameters(cls, values: Any) -> Any: + if ( + values.get("include_keywords") is None + and values.get("exclude_keywords") is None + and values.get("sampling_count") is None + ): + raise ValueError("include_keywords, exclude_keywords and sampling_count cannot all be None") + return values diff --git a/ymir/backend/src/ymir_app/app/schemas/graph.py b/ymir/backend/src/ymir_app/app/schemas/graph.py deleted file mode 100644 index 99162b847e..0000000000 --- a/ymir/backend/src/ymir_app/app/schemas/graph.py +++ /dev/null @@ -1,42 +0,0 @@ -from enum import IntEnum -from typing import Dict, List, Optional - -from pydantic import BaseModel, Field - -from .common import Common - - -class NodeType(IntEnum): - dataset = 1 - model = 2 - - -class Node(BaseModel): - id: int - name: str - hash: str - type: int - proprieties: Optional[Dict] = None - - -class Task(BaseModel): - id: int - name: Optional[str] - hash: Optional[str] - type: Optional[int] - proprieties: Optional[Dict] = None - - -class Edge(BaseModel): - source: str = Field(description="node hash") - target: str = Field(description="node hash") - task: Task - - -class Graph(BaseModel): - nodes: List[Node] - edges: List[Edge] - - -class GraphOut(Common): - result: Graph diff --git a/ymir/backend/src/ymir_app/app/schemas/image.py b/ymir/backend/src/ymir_app/app/schemas/image.py index 99844c8cc7..789803701b 100644 --- a/ymir/backend/src/ymir_app/app/schemas/image.py +++ b/ymir/backend/src/ymir_app/app/schemas/image.py @@ -18,6 +18,7 @@ class DockerImageBase(BaseModel): hash: Optional[str] url: Optional[str] description: Optional[str] + enable_livecode: Optional[bool] = False class DockerImageCreate(DockerImageBase): @@ -32,6 +33,7 @@ class DockerImageUpdate(BaseModel): name: Optional[str] description: Optional[str] is_shared: Optional[bool] + enable_livecode: Optional[bool] class DockerImageInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, DockerImageBase): diff --git a/ymir/backend/src/ymir_app/app/schemas/inference.py b/ymir/backend/src/ymir_app/app/schemas/inference.py index d8ccfea1dc..048c6c0e4e 100644 --- a/ymir/backend/src/ymir_app/app/schemas/inference.py +++ b/ymir/backend/src/ymir_app/app/schemas/inference.py @@ -7,7 +7,8 @@ class InferenceBase(BaseModel): docker_image: str - model_id: int + project_id: int + model_stage_id: int image_urls: List[str] docker_image_config: Dict = Field(description="inference docker image runtime configuration") @@ -41,7 +42,7 @@ class Annotation(BaseModel): class InferenceResult(BaseModel): - model_id: int + model_stage_id: int annotations: List[Annotation] diff --git a/ymir/backend/src/ymir_app/app/schemas/iteration.py b/ymir/backend/src/ymir_app/app/schemas/iteration.py index 3ced60bb61..57c7f32b77 100644 --- a/ymir/backend/src/ymir_app/app/schemas/iteration.py +++ b/ymir/backend/src/ymir_app/app/schemas/iteration.py @@ -1,8 +1,7 @@ from typing import List, Optional - from pydantic import BaseModel -from app.constants.state import IterationStage +from app.constants.state import IterationStage, ResultState, TaskType from app.schemas.common import ( Common, DateTimeModelMixin, @@ -16,12 +15,14 @@ class IterationBase(BaseModel): previous_iteration: int description: Optional[str] current_stage: Optional[IterationStage] + mining_dataset_id: Optional[int] mining_input_dataset_id: Optional[int] mining_output_dataset_id: Optional[int] label_output_dataset_id: Optional[int] training_input_dataset_id: Optional[int] training_output_model_id: Optional[int] - testing_dataset_id: Optional[int] + training_output_model_stage_id: Optional[int] + validation_dataset_id: Optional[int] user_id: int project_id: int @@ -33,12 +34,14 @@ class IterationCreate(BaseModel): description: Optional[str] project_id: int current_stage: Optional[IterationStage] = IterationStage.prepare_mining + mining_dataset_id: Optional[int] mining_input_dataset_id: Optional[int] mining_output_dataset_id: Optional[int] label_output_dataset_id: Optional[int] training_input_dataset_id: Optional[int] training_output_model_id: Optional[int] - testing_dataset_id: Optional[int] + training_output_model_stage_id: Optional[int] + validation_dataset_id: Optional[int] # Properties that can be changed @@ -50,13 +53,33 @@ class IterationUpdate(BaseModel): label_output_dataset_id: Optional[int] training_input_dataset_id: Optional[int] training_output_model_id: Optional[int] - testing_dataset_id: Optional[int] + training_output_model_stage_id: Optional[int] + validation_dataset_id: Optional[int] class Config: use_enum_values = True +class IterationStepLite(BaseModel): + """ + Copied from iteration_step, to avoid circular importing + """ + + id: int + name: str + task_type: TaskType + task_id: Optional[int] + is_finished: Optional[bool] + state: Optional[ResultState] + percent: Optional[float] + + class Config: + orm_mode = True + + class IterationInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, IterationBase): + current_step: Optional[IterationStepLite] + class Config: orm_mode = True @@ -81,3 +104,19 @@ class IterationPagination(BaseModel): class IterationPaginationOut(Common): result: IterationPagination + + +class MiningRatio(BaseModel): + class_name: Optional[str] + processed_assets_count: int + total_assets_count: int + + +class IterationMiningProgress(BaseModel): + total_mining_ratio: MiningRatio + class_wise_mining_ratio: List[MiningRatio] + negative_ratio: MiningRatio + + +class IterationMiningProgressOut(Common): + result: IterationMiningProgress diff --git a/ymir/backend/src/ymir_app/app/schemas/iteration_step.py b/ymir/backend/src/ymir_app/app/schemas/iteration_step.py new file mode 100644 index 0000000000..1ea64d5dde --- /dev/null +++ b/ymir/backend/src/ymir_app/app/schemas/iteration_step.py @@ -0,0 +1,53 @@ +from typing import Dict, List, Optional +from pydantic import BaseModel + +from app.constants.state import TaskType, ResultState +from app.schemas.common import ( + Common, + DateTimeModelMixin, + IdModelMixin, + IsDeletedModelMixin, +) + + +class IterationStepBase(BaseModel): + name: str + task_type: TaskType + iteration_id: int + task_id: Optional[int] + is_finished: Optional[bool] + state: Optional[ResultState] + percent: Optional[float] + + +class IterationStepCreate(BaseModel): + name: str + task_type: TaskType + iteration_id: int + + +class IterationStepUpdate(BaseModel): + is_finished: Optional[bool] + task_id: Optional[int] + serialized_presetting: str + + +class IterationStepInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, IterationStepBase): + percent: Optional[float] + state: Optional[ResultState] + presetting: Optional[Dict] + + class Config: + orm_mode = True + + +class IterationStep(IterationStepInDBBase): + pass + + +class IterationStepOut(Common): + result: IterationStep + + +class IterationStepsOut(Common): + result: List[IterationStep] diff --git a/ymir/backend/src/ymir_app/app/schemas/keyword.py b/ymir/backend/src/ymir_app/app/schemas/keyword.py index 1bb8aa6f70..ddab1a8c35 100644 --- a/ymir/backend/src/ymir_app/app/schemas/keyword.py +++ b/ymir/backend/src/ymir_app/app/schemas/keyword.py @@ -34,3 +34,11 @@ class KeywordsCreateResult(BaseModel): class KeywordsCreateOut(Common): result: KeywordsCreateResult + + +class KeywordsInput(Common): + keywords: List[SingleLabel] + + +class KeywordsCheckDupOut(Common): + result: List[str] diff --git a/ymir/backend/src/ymir_app/app/schemas/model.py b/ymir/backend/src/ymir_app/app/schemas/model.py index 4f251c0168..d908244a0a 100644 --- a/ymir/backend/src/ymir_app/app/schemas/model.py +++ b/ymir/backend/src/ymir_app/app/schemas/model.py @@ -1,4 +1,5 @@ from typing import Any, List, Optional +import json from pydantic import BaseModel, Field, root_validator, validator from app.config import settings @@ -9,6 +10,7 @@ IdModelMixin, IsDeletedModelMixin, ) +from app.schemas.model_stage import ModelStageInDBBase from app.schemas.task import TaskInternal @@ -21,6 +23,7 @@ class ModelBase(BaseModel): source: TaskType description: Optional[str] map: Optional[float] = Field(description="Mean Average Precision") + keywords: Optional[str] result_state: ResultState = ResultState.processing model_group_id: int project_id: int @@ -51,11 +54,13 @@ def gen_import_type(cls, v: TaskType, values: Any) -> TaskType: class ModelCreate(ModelBase): task_id: int user_id: int + description: Optional[str] class ModelUpdate(BaseModel): name: str description: Optional[str] + keywords: Optional[str] class ModelInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, ModelBase): @@ -64,6 +69,8 @@ class ModelInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, Model version_num: int related_task: Optional[TaskInternal] is_visible: bool + related_stages: List[ModelStageInDBBase] + recommended_stage: Optional[int] = None class Config: orm_mode = True @@ -71,6 +78,7 @@ class Config: # Properties to return to caller class Model(ModelInDBBase): + keywords: Optional[str] url: Optional[str] = None @root_validator @@ -80,6 +88,13 @@ def make_up_url(cls, values: Any) -> Any: values["url"] = get_model_url(values["hash"]) return values + # unpack json dumpped keywords before returning to caller + @validator("keywords") + def unpack(cls, v: Optional[str]) -> List[str]: + if v is None: + return [] + return json.loads(v) + class ModelPagination(BaseModel): total: int @@ -96,3 +111,7 @@ class ModelsOut(Common): class ModelPaginationOut(Common): result: ModelPagination + + +class StageChange(BaseModel): + stage_id: int diff --git a/ymir/backend/src/ymir_app/app/schemas/model_stage.py b/ymir/backend/src/ymir_app/app/schemas/model_stage.py new file mode 100644 index 0000000000..f581145420 --- /dev/null +++ b/ymir/backend/src/ymir_app/app/schemas/model_stage.py @@ -0,0 +1,55 @@ +from pydantic import BaseModel, Field +from typing import List, Optional + +from app.schemas.common import ( + Common, + DateTimeModelMixin, + IdModelMixin, + IsDeletedModelMixin, +) + + +class ModelStageBase(BaseModel): + name: str = Field(description="Model stage Name") + map: Optional[float] = Field(description="Mean Average Precision") + timestamp: int = Field(description="Timestamp") + model_id: int + + +class ModelStageCreate(ModelStageBase): + pass + + +class ModelStageUpdate(ModelStageBase): + pass + + +class ModelStageInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, ModelStageBase): + class Config: + orm_mode = True + + +class Model(BaseModel): + """ + Just a replica of model schema, to avoid circular import + """ + + id: int + hash: str + group_name: str + version_num: int + + class Config: + orm_mode = True + + +class ModelStage(ModelStageInDBBase): + model: Model + + +class ModelStageOut(Common): + result: ModelStage + + +class ModelStagesOut(Common): + result: List[ModelStage] diff --git a/ymir/backend/src/ymir_app/app/schemas/project.py b/ymir/backend/src/ymir_app/app/schemas/project.py index a0f4436bfa..b90a24e4d6 100644 --- a/ymir/backend/src/ymir_app/app/schemas/project.py +++ b/ymir/backend/src/ymir_app/app/schemas/project.py @@ -1,7 +1,7 @@ import json from typing import List, Optional -from pydantic import BaseModel, Field, validator +from pydantic import BaseModel, validator from app.constants.state import MiningStrategy, TrainingType from app.schemas.common import ( @@ -17,19 +17,22 @@ class ProjectBase(BaseModel): - name: str = Field(description="Project Name") + name: str description: Optional[str] mining_strategy: MiningStrategy = MiningStrategy.chunk chunk_size: Optional[int] = 0 - training_type: TrainingType = TrainingType.object_detect - + enable_iteration: Optional[bool] = True iteration_target: Optional[int] map_target: Optional[float] training_dataset_count_target: Optional[int] + is_example: Optional[bool] = False + training_type: TrainingType = TrainingType.object_detect + candidate_training_dataset_id: Optional[int] + # Sufficient properties to create a project class ProjectCreate(ProjectBase): @@ -51,10 +54,13 @@ class ProjectUpdate(BaseModel): mining_strategy: MiningStrategy = MiningStrategy.chunk chunk_size: Optional[int] mining_dataset_id: Optional[int] - testing_dataset_id: Optional[int] + validation_dataset_id: Optional[int] + testing_dataset_ids: Optional[str] description: Optional[str] initial_model_id: Optional[int] + initial_model_stage_id: Optional[int] initial_training_dataset_id: Optional[int] + candidate_training_dataset_id: Optional[int] training_keywords: Optional[List[str]] @@ -75,17 +81,19 @@ def pack_keywords(cls, v: Optional[List[str]]) -> Optional[str]: class ProjectInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, ProjectBase): training_dataset_group_id: Optional[int] mining_dataset_id: Optional[int] - testing_dataset_id: Optional[int] + validation_dataset_id: Optional[int] initial_model_id: Optional[int] + initial_model_stage_id: Optional[int] initial_training_dataset_id: Optional[int] current_iteration: Optional[Iteration] training_dataset_group: Optional[DatasetGroup] - testing_dataset: Optional[Dataset] + validation_dataset: Optional[Dataset] mining_dataset: Optional[Dataset] referenced_model_ids: List[int] referenced_dataset_ids: List[int] + testing_dataset_ids: Optional[str] class Config: orm_mode = True @@ -97,11 +105,20 @@ class Project(ProjectInDBBase): model_count: int = 0 training_keywords: List[str] current_iteration_id: Optional[int] + total_asset_count: int = 0 + running_task_count: int = 0 + total_task_count: int = 0 @validator("training_keywords", pre=True) def unpack_keywords(cls, v: str) -> List[str]: return json.loads(v) + @validator("enable_iteration", pre=True) + def make_up_default_value(cls, v: Optional[bool]) -> bool: + if v is None: + return True + return v + class ProjectOut(Common): result: Project diff --git a/ymir/backend/src/ymir_app/app/schemas/stats.py b/ymir/backend/src/ymir_app/app/schemas/stats.py index badae580eb..2fc1d3eb6a 100644 --- a/ymir/backend/src/ymir_app/app/schemas/stats.py +++ b/ymir/backend/src/ymir_app/app/schemas/stats.py @@ -47,3 +47,12 @@ class StatsProjectsCountOut(Common): class StatsModelmAPsOut(Common): result: Dict[str, List[Tuple[int, float]]] + + +class StatsMetricsQueryPoint(BaseModel): + legend: str + count: int + + +class StatsMetricsQueryOut(Common): + result: List[StatsMetricsQueryPoint] diff --git a/ymir/backend/src/ymir_app/app/schemas/sys_info.py b/ymir/backend/src/ymir_app/app/schemas/sys_info.py index bade7ec4d6..e576666d5e 100644 --- a/ymir/backend/src/ymir_app/app/schemas/sys_info.py +++ b/ymir/backend/src/ymir_app/app/schemas/sys_info.py @@ -5,6 +5,7 @@ class SysInfoBase(BaseModel): gpu_count: int + openpai_enabled: bool class SysInfo(SysInfoBase): diff --git a/ymir/backend/src/ymir_app/app/schemas/task.py b/ymir/backend/src/ymir_app/app/schemas/task.py index fa0fbaedc8..11c16ec1eb 100644 --- a/ymir/backend/src/ymir_app/app/schemas/task.py +++ b/ymir/backend/src/ymir_app/app/schemas/task.py @@ -1,16 +1,18 @@ +import enum import json from datetime import datetime from typing import Any, Dict, List, Optional, Union from pydantic import BaseModel, EmailStr, Field, validator, root_validator -from app.constants.state import TaskState, TaskType, ResultState, ResultType, IterationStage +from app.constants.state import AnnotationType, TaskState, TaskType, ResultState, ResultType, IterationStage from app.schemas.common import ( Common, DateTimeModelMixin, IdModelMixin, IsDeletedModelMixin, ) +from id_definition.task_id import TaskId class TaskBase(BaseModel): @@ -22,6 +24,20 @@ class Config: use_enum_values = True +class TrainingDatasetsStrategy(enum.IntEnum): + stop = 0 + as_training = 1 # use duplicated assets as training assets + as_validation = 2 # use duplicated assets as validation assets + + +class LongsideResizeParameter(BaseModel): + dest_size: int + + +class TaskPreprocess(BaseModel): + longside_resize: LongsideResizeParameter + + class TaskParameter(BaseModel): dataset_id: int keywords: Optional[List[str]] @@ -29,16 +45,19 @@ class TaskParameter(BaseModel): # label extra_url: Optional[str] labellers: Optional[List[EmailStr]] - keep_annotations: Optional[bool] + annotation_type: Optional[AnnotationType] = None # training validation_dataset_id: Optional[int] network: Optional[str] backbone: Optional[str] hyperparameter: Optional[str] + strategy: Optional[TrainingDatasetsStrategy] = TrainingDatasetsStrategy.stop + preprocess: Optional[TaskPreprocess] = Field(description="preprocess to apply to related dataset") # mining & dataset_infer model_id: Optional[int] + model_stage_id: Optional[int] mining_algorithm: Optional[str] top_k: Optional[int] generate_annotations: Optional[bool] @@ -60,6 +79,8 @@ class TaskCreate(TaskBase): iteration_stage: Optional[IterationStage] parameters: TaskParameter = Field(description="task specific parameters") docker_image_config: Optional[Dict] = Field(description="docker runtime configuration") + preprocess: Optional[TaskPreprocess] = Field(description="preprocess to apply to related dataset") + result_description: Optional[str] = Field(description="description for task result, not task itself") @validator("docker_image_config") def dumps_docker_image_config(cls, v: Optional[Union[str, Dict]], values: Dict[str, Any]) -> Optional[str]: @@ -70,6 +91,18 @@ def dumps_docker_image_config(cls, v: Optional[Union[str, Dict]], values: Dict[s else: return v + @root_validator(pre=True) + def tuck_preprocess_into_parameters(cls, values: Any) -> Any: + """ + For frontend, preprocess is a separate task configuration, + however, the underlying reads preprocess stuff from task_parameter, + so we just tuck preprocess into task_parameter + """ + preprocess = values.get("preprocess") + if preprocess: + values["parameters"]["preprocess"] = preprocess + return values + class Config: use_enum_values = True @@ -87,12 +120,12 @@ class TaskInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, TaskBa state: Optional[TaskState] = TaskState.pending error_code: Optional[str] duration: Optional[int] = Field(0, description="task process time in seconds") - percent: Optional[float] = Field(0, description="from 0 to 1") + percent: Optional[float] = Field(0, ge=0, le=1) parameters: Optional[str] = Field(description="json dumped input parameters when creating task") config: Optional[str] = Field(description="json dumped docker runtime configuration") user_id: int = Field(description="task owner's user_id") - last_message_datetime: datetime = None # type: ignore + last_message_datetime: Optional[datetime] = None is_terminated: bool = False @@ -116,21 +149,17 @@ class Config: class TaskInternal(TaskInDBBase): - parameters: Optional[str] - config: Optional[str] + parameters: Optional[Any] + config: Optional[Any] state: TaskState result_type: ResultType = ResultType.no_result - @validator("parameters") - def loads_parameters(cls, v: str) -> Dict[str, Any]: - if not v: - return {} - return json.loads(v) - - @validator("config") - def loads_config(cls, v: str) -> Dict[str, Any]: + @validator("parameters", "config") + def ensure_dict(cls, v: Optional[Union[Dict, str]]) -> Dict[str, Any]: if not v: return {} + if isinstance(v, dict): + return v return json.loads(v) @validator("result_type", pre=True, always=True) @@ -145,6 +174,8 @@ def gen_result_type(cls, v: Any, values: Any) -> Optional[ResultType]: TaskType.import_data, TaskType.copy_data, TaskType.data_fusion, + TaskType.filter, + TaskType.merge, ]: return ResultType.dataset else: @@ -154,8 +185,18 @@ class Config: use_enum_values = True -class TaskResult(BaseModel): +class DatasetResult(BaseModel): + id: int + dataset_group_id: int + result_state: ResultState + + class Config: + orm_mode = True + + +class ModelResult(BaseModel): id: int + model_group_id: int result_state: ResultState class Config: @@ -163,8 +204,8 @@ class Config: class Task(TaskInternal): - result_model: Optional[TaskResult] - result_dataset: Optional[TaskResult] + result_model: Optional[ModelResult] + result_dataset: Optional[DatasetResult] @root_validator def ensure_terminate_state(cls, values: Any) -> Any: @@ -220,8 +261,8 @@ class TaskUpdateStatus(BaseModel): @classmethod def from_monitor_event(cls, msg: str) -> "TaskUpdateStatus": payload = json.loads(msg) - user_id = int(payload["task_extra_info"]["user_id"]) event = payload["percent_result"] + user_id = int(TaskId.from_task_id(event["task_id"]).user_id) return cls( user_id=user_id, hash=event["task_id"], @@ -239,8 +280,8 @@ class TaskResultUpdateMessage(BaseModel): percent: float state: int result_state: Optional[int] - result_model: Optional[TaskResult] - result_dataset: Optional[TaskResult] + result_model: Optional[ModelResult] + result_dataset: Optional[DatasetResult] @root_validator(pre=True) def gen_result_state(cls, values: Any) -> Any: @@ -270,3 +311,16 @@ class TaskPagination(BaseModel): class TaskPaginationOut(Common): result: TaskPagination + + +class PaiTaskStatus(BaseModel): + position: int + total_pending_task: int + + +class PaiTask(Task): + pai_status: Optional[PaiTaskStatus] + + +class PaiTaskOut(Common): + result: PaiTask diff --git a/ymir/backend/src/ymir_app/app/schemas/token.py b/ymir/backend/src/ymir_app/app/schemas/token.py index 097fd0bf54..ddcf07ba88 100644 --- a/ymir/backend/src/ymir_app/app/schemas/token.py +++ b/ymir/backend/src/ymir_app/app/schemas/token.py @@ -1,3 +1,5 @@ +from typing import Optional + from pydantic import BaseModel from .common import Common @@ -17,3 +19,4 @@ class TokenOut(Common): class TokenPayload(BaseModel): id: int role: str + version: Optional[str] diff --git a/ymir/backend/src/ymir_app/app/schemas/user.py b/ymir/backend/src/ymir_app/app/schemas/user.py index 2d0a792d44..dc1cf37ff6 100644 --- a/ymir/backend/src/ymir_app/app/schemas/user.py +++ b/ymir/backend/src/ymir_app/app/schemas/user.py @@ -32,6 +32,8 @@ class UserBase(BaseModel): phone: Optional[str] = None avatar: Optional[str] = None state: UserState = UserState.registered + organization: Optional[str] = None + scene: Optional[str] = None # Properties to receive via API on creation diff --git a/ymir/backend/src/ymir_app/app/utils/cache.py b/ymir/backend/src/ymir_app/app/utils/cache.py index 5e56f8d1ca..3e82b264a8 100644 --- a/ymir/backend/src/ymir_app/app/utils/cache.py +++ b/ymir/backend/src/ymir_app/app/utils/cache.py @@ -9,7 +9,7 @@ class CacheClient: - def __init__(self, redis_uri: str, user_id: int): + def __init__(self, *, redis_uri: str = settings.BACKEND_REDIS_URL, user_id: int): self.prefix = "cache" self.user_id = user_id self.conn = self._get_redis_con(redis_uri) diff --git a/ymir/backend/src/ymir_app/app/utils/clickhouse.py b/ymir/backend/src/ymir_app/app/utils/clickhouse.py deleted file mode 100644 index ce9ede02bb..0000000000 --- a/ymir/backend/src/ymir_app/app/utils/clickhouse.py +++ /dev/null @@ -1,347 +0,0 @@ -from dataclasses import dataclass -from datetime import datetime -from typing import Any, Dict, List, Optional, Tuple, Union, Type - -from clickhouse_driver import Client -from fastapi.logger import logger - -from app.api.errors.errors import FailedToConnectClickHouse -from app.constants.state import TaskType, TrainingType -from app.config import settings -from app.utils.data import groupby - - -@dataclass -class ModelwithmAP: - user_id: int - model_id: int - keyword: str - mAP: float - rank: int - - @classmethod - def from_clickhouse(cls, res: Tuple) -> "ModelwithmAP": - return cls(*res) - - -@dataclass -class TimeBasedCount: - type_: str - count: int - time: datetime - - @classmethod - def from_clickhouse(cls, res: Tuple) -> "TimeBasedCount": - return cls(*res) - - -class YmirClickHouse: - def __init__(self, host: str = settings.CLICKHOUSE_URI): - self.client = Client(host=host) - - def execute(self, query: str, params: Optional[Any] = None) -> Any: - try: - records = self.client.execute(query, params) - except Exception as e: - print(e) - raise FailedToConnectClickHouse() - return records - - def save_project_parameter( - self, - dt: datetime, - user_id: int, - id_: int, - name: str, - training_type: str, - training_keywords: List[str], - ) -> Any: - return self.execute( - "INSERT INTO project VALUES", - [[dt, user_id, id_, name, training_type, training_keywords]], - ) - - def save_task_parameter( - self, - dt: datetime, - user_id: int, - project_id: int, - name: str, - hash_: str, - type_: str, - dataset_ids: List[int], - model_ids: List[int], - keywords: List[str], - ) -> Any: - return self.execute( - "INSERT INTO task_create VALUES", - [[dt, user_id, project_id, name, hash_, type_, dataset_ids, model_ids, keywords]], - ) - - def save_model_result( - self, - dt: datetime, - user_id: int, - project_id: int, - group_id: int, - id_: int, - name: str, - hash_: str, - map_: float, - keywords: List[str], - ) -> Any: - return self.execute( - "INSERT INTO model VALUES", - [[dt, user_id, project_id, group_id, id_, name, hash_, map_, keywords]], - ) - - def save_dataset_keyword( - self, dt: datetime, user_id: int, project_id: int, group_id: int, dataset_id: int, keywords: List[str] - ) -> Any: - """ - for keywords recommendation - """ - return self.execute( - "INSERT INTO dataset_keywords VALUES", [[dt, user_id, project_id, group_id, dataset_id, keywords]] - ) - - def get_popular_items(self, user_id: int, column: str, limit: int = 10) -> Any: - """ - Get most popular datasets, models or keywords - """ - sql = f"""\ -SELECT - {column} AS item, - count({column}) AS ref_count -FROM task_create -ARRAY JOIN {column} -WHERE user_id = %(user_id)s -GROUP BY {column} -ORDER BY ref_count DESC -LIMIT %(limit)s""" - return self.execute(sql, {"user_id": user_id, "limit": limit}) - - def get_models_order_by_map(self, user_id: int, keywords: Optional[List[str]] = None, limit: int = 10) -> Any: - """ - Get models of highest mAP score, partitioned by keywords - """ - sql = """\ -SELECT - user_id, - id, - keyword_ids, - map, - RANK() OVER (PARTITION BY user_id, keyword_ids ORDER BY map DESC) AS ranking -FROM -( - SELECT - user_id, - id, - keyword_ids, - map - FROM model - ARRAY JOIN keyword_ids - WHERE user_id = %(user_id)s -) -LIMIT %(limit)s""" - records = self.execute(sql, {"user_id": user_id, "limit": limit}) - models = [ModelwithmAP.from_clickhouse(record) for record in records] - return {keyword: [[m.model_id, m.mAP] for m in models_] for keyword, models_ in groupby(models, "keyword")} - - def get_recommend_keywords(self, user_id: int, dataset_ids: List[int], limit: int = 10) -> Any: - sql = """\ -SELECT - keyword_ids, - sum(keyword_count) AS ref_count -FROM -( - SELECT - keyword_ids, - count(keyword_ids) AS keyword_count - FROM dataset_keywords -ARRAY JOIN keyword_ids - WHERE user_id = %(user_id)s AND dataset_id IN (%(dataset_ids)s) - GROUP BY - dataset_id, - keyword_ids -) -GROUP BY keyword_ids -ORDER BY ref_count DESC -LIMIT %(limit)s""" - return self.execute(sql, {"user_id": user_id, "dataset_ids": dataset_ids, "limit": limit}) - - def get_task_count( - self, - user_id: int, - precision: str, - start_at: datetime, - end_at: datetime, - limit: int = 10, - ) -> Dict: - """ - Get tasks distribution across given precision - - user_id: task owner - precision: day, week or month - limit: data points count - """ - step = 1 - if precision == "month": - sql = f"""\ -WITH - toDate(0) AS start_date, - toRelativeMonthNum(start_date) AS relative_month_of_start_date -SELECT - type, - task_count, - addMonths(start_date, relative_month - relative_month_of_start_date) AS time -FROM -( - SELECT - toRelativeMonthNum(created_time) AS relative_month, - type, - count(type) AS task_count - FROM task_create - WHERE user_id = %(user_id)s - GROUP BY - type, - relative_month - ORDER BY relative_month ASC WITH FILL - FROM toRelativeMonthNum(toDate(%(start_at)s)) - TO toRelativeMonthNum(toDate(%(end_at)s)) STEP {step} -) -ORDER BY time ASC""" - else: - if precision == "week": - step = 7 - sql = f"""\ -SELECT - type, - count(type) as task_count, - toDate(toStartOfInterval(created_time, INTERVAL 1 {precision})) AS time -FROM task_create -WHERE user_id = %(user_id)s -GROUP BY - type, - time -ORDER BY time ASC WITH FILL - FROM toDate(%(start_at)s) - TO toDate(%(end_at)s) STEP {step}""" - - records = self.execute( - sql, - { - "user_id": user_id, - "start_at": start_at, - "end_at": end_at, - "limit": limit, - }, - ) - return prepare_time_based_count(records, limit, TaskType) - - def get_project_count( - self, - user_id: int, - precision: str, - start_at: datetime, - end_at: datetime, - limit: int = 10, - ) -> Dict: - """ - Get projects distribution across given precision - - user_id: projects owner - precision: day, week or month - limit: data points count - """ - step = 1 - if precision == "month": - sql = f"""\ -WITH - toDate(0) AS start_date, - toRelativeMonthNum(start_date) AS relative_month_of_start_date -SELECT - training_type, - project_count, - addMonths(start_date, relative_month - relative_month_of_start_date) AS time -FROM -( - SELECT - toRelativeMonthNum(created_time) AS relative_month, - training_type, - count(training_type) AS project_count - FROM project - WHERE user_id = %(user_id)s - GROUP BY - training_type, - relative_month - ORDER BY relative_month ASC WITH FILL - FROM toRelativeMonthNum(toDate(%(start_at)s)) - TO toRelativeMonthNum(toDate(%(end_at)s)) STEP {step} -) -ORDER BY time ASC""" - elif precision == "week": - sql = f"""\ -WITH - toDate(0) AS start_date, - toRelativeWeekNum(start_date) AS relative_week_of_start_date -SELECT - training_type, - project_count, - addWeeks(start_date, relative_week - relative_week_of_start_date) AS time -FROM -( - SELECT - toRelativeWeekNum(created_time) AS relative_week, - training_type, - count(training_type) AS project_count - FROM project - WHERE user_id = %(user_id)s - GROUP BY - training_type, - relative_week - ORDER BY relative_week ASC WITH FILL - FROM toRelativeWeekNum(toDate(%(start_at)s)) - TO toRelativeWeekNum(toDate(%(end_at)s)) STEP {step} -) -ORDER BY time ASC""" - else: - sql = f"""\ -SELECT - training_type, - count(training_type) as project_count, - toDate(toStartOfInterval(created_time, INTERVAL 1 {precision})) AS time -FROM project -WHERE user_id = %(user_id)s -GROUP BY - training_type, - time -ORDER BY time ASC WITH FILL - FROM toDate(%(start_at)s) - TO toDate(%(end_at)s) STEP {step}""" - - records = self.execute( - sql, - { - "user_id": user_id, - "start_at": start_at, - "end_at": end_at, - "limit": limit, - }, - ) - return prepare_time_based_count(records, limit, TrainingType) - - def close(self) -> None: - logger.debug("clickhouse client closed") - - -def prepare_time_based_count(records: List, limit: int, typer: Type[Union[TaskType, TrainingType]]) -> Dict: - times = [] - defaults = {type_.value: 0 for type_ in typer} - stats = [] - for dt, records_ in groupby([TimeBasedCount.from_clickhouse(r) for r in records], "time"): - times.append(dt) - count = dict(defaults) - count.update({typer[record.type_].value: record.count for record in records_ if record.type_}) - stats.append(count) - return {"records": stats[-limit:], "timestamps": times[-limit:]} diff --git a/ymir/backend/src/ymir_app/app/utils/files.py b/ymir/backend/src/ymir_app/app/utils/files.py index 02a22cc295..5fe9a8ab45 100644 --- a/ymir/backend/src/ymir_app/app/utils/files.py +++ b/ymir/backend/src/ymir_app/app/utils/files.py @@ -68,7 +68,7 @@ def save_file_content(url: Union[AnyHttpUrl, str], output_filename: Union[Path, # if file is hosted by nginx on the same host, just copy it file_path = Path(NGINX_DATA_PATH) / url - shutil.copy(file_path, output_filename) + shutil.move(str(file_path), output_filename) def download_file(url: AnyHttpUrl, output_filename: str) -> None: @@ -95,39 +95,6 @@ def decompress_zip(zip_file_path: Union[str, Path], output_dir: Union[str, Path] zip_ref.extractall(str(output_dir)) -def locate_dir(p: Union[str, Path], target: str) -> Path: - """ - Locate specifc target dirs - """ - for _p in Path(p).iterdir(): - if _p.is_dir() and _p.name.lower() == target: - return _p - for __p in _p.iterdir(): - if __p.is_dir() and __p.name.lower() == target: - return __p - # Only search 3rd depth when no result was found in 2nd depth. - for _p in Path(p).iterdir(): - for __p in _p.iterdir(): - for ___p in __p.iterdir(): - if ___p.is_dir() and ___p.name.lower() == target: - return ___p - raise FileNotFoundError() - - -def prepare_imported_dataset_dir(url: str, output_dir: Union[str, Path]) -> str: - with NamedTemporaryFile("wb") as tmp: - save_file_content(url, tmp.name) - logging.info("[import dataset] url content cached to %s", tmp.name) - decompress_zip(tmp.name, output_dir) - - image_dir = locate_dir(output_dir, "images") - annotation_dir = locate_dir(output_dir, "annotations") - if image_dir.parent != annotation_dir.parent: - logging.error("[import dataset] image(%s) and annotation(%s) not in the same dir", image_dir, annotation_dir) - raise InvalidFileStructure() - return str(image_dir.parent) - - def save_file( url: Union[AnyHttpUrl, str], output_dir: Union[str, Path], @@ -148,6 +115,64 @@ def save_files(urls: List[Union[AnyHttpUrl, str]], output_basedir: Union[str, Pa return output_dir, {filename.name: url for filename, url in zip(res, urls)} +def locate_dir(p: Union[str, Path], targets: List[str]) -> Path: + """ + Locate specifc target dirs + """ + for _p in Path(p).iterdir(): + if not _p.is_dir(): + continue + if _p.name.lower() in targets: + return _p + for __p in _p.iterdir(): + if not __p.is_dir(): + continue + if __p.name.lower() in targets: + return __p + # Only search 3rd depth when no result was found in 2nd depth. + for _p in Path(p).iterdir(): + if not _p.is_dir(): + continue + for __p in _p.iterdir(): + if not __p.is_dir(): + continue + for ___p in __p.iterdir(): + if ___p.is_dir() and ___p.name.lower() in targets: + return ___p + raise FileNotFoundError + + +def locate_annotation_dir(p: Path, targets: List[str]) -> Optional[Path]: + """ + annotation_dir (gt or pred) must be in sibling with asset_dir + p: asset_dir.parent + targets: ["Annotations", "gt"] or ["pred"] + """ + for _p in Path(p).iterdir(): + if not _p.is_dir(): + continue + if _p.name.lower() in targets: + return _p + return None + + +def locate_ymir_dataset_dirs(path: Path) -> Tuple[Path, Optional[Path], Optional[Path]]: + # only `asset_dir` (images) is required + # both `gt_dir` and `pred_dir` are optional + asset_dir = locate_dir(path, ["images", "jpegimages"]) + gt_dir = locate_annotation_dir(asset_dir.parent, ["gt", "annotations"]) + pred_dir = locate_annotation_dir(asset_dir.parent, ["pred"]) + return asset_dir, gt_dir, pred_dir + + +def prepare_downloaded_paths(url: str, output_dir: Union[str, Path]) -> Tuple[Path, Optional[Path], Optional[Path]]: + with NamedTemporaryFile("wb") as tmp: + save_file_content(url, tmp.name) + logging.info("[import dataset] url content cached to %s", tmp.name) + decompress_zip(tmp.name, output_dir) + return locate_ymir_dataset_dirs(Path(output_dir)) + + def is_relative_to(path_long: Union[str, Path], path_short: Union[str, Path]) -> bool: """ mimic the behavior of Path.is_relative_to in Python 3.9 @@ -156,11 +181,10 @@ def is_relative_to(path_long: Union[str, Path], path_short: Union[str, Path]) -> return Path(path_short) in Path(path_long).parents -def verify_import_path(src_path: Union[str, Path]) -> None: - src_path = Path(src_path) - annotation_path = src_path / "annotations" - if not (src_path.is_dir() and annotation_path.is_dir()): - raise InvalidFileStructure() - if not is_relative_to(annotation_path, settings.SHARED_DATA_DIR): - logger.error("import path (%s) not within shared_dir (%s)" % (annotation_path, settings.SHARED_DATA_DIR)) +def locate_import_paths(src_path: Union[str, Path]) -> Tuple[Path, Optional[Path], Optional[Path]]: + asset_dir, gt_dir, pred_dir = locate_ymir_dataset_dirs(Path(src_path)) + + if not is_relative_to(asset_dir, settings.SHARED_DATA_DIR): + logger.error("import path (%s) not within shared_dir (%s)" % (asset_dir, settings.SHARED_DATA_DIR)) raise InvalidFileStructure() + return asset_dir, gt_dir, pred_dir diff --git a/ymir/backend/src/ymir_app/app/utils/graph.py b/ymir/backend/src/ymir_app/app/utils/graph.py deleted file mode 100644 index 049bdb9855..0000000000 --- a/ymir/backend/src/ymir_app/app/utils/graph.py +++ /dev/null @@ -1,142 +0,0 @@ -import json -from typing import Dict, Generator, List, Optional, Tuple, Union - -from redis import StrictRedis -from redisgraph import Edge, Graph, Node, Path - - -class YmirNode(Node): - @classmethod - def from_dict(cls, node: Dict) -> Node: - """ - required keys for node: - - id: model_id or dataset_id - - label: `Model` or `Dataset` - """ - node["type"] = 1 if node["label"] == "Dataset" else 2 - return cls(node["id"], label=node["label"], properties=node) - - -class YmirTask(Edge): - @classmethod - def from_dicts(cls, source: Dict, target: Dict, task: Dict) -> Edge: - src_node = YmirNode.from_dict(source) - dest_node = YmirNode.from_dict(target) - return cls(src_node, "Task", dest_node, task["id"], properties=task) - - -class GraphClient: - def __init__(self, redis_uri: Optional[str]): - self.redis_con = self._get_redis_con(redis_uri) - self._graph = None - self._user_id = None # type: Optional[int] - - @property - def user_id(self) -> Optional[int]: - return self._user_id - - @user_id.setter - def user_id(self, user_id: int) -> None: - self._user_id = user_id - - @property - def graph(self) -> Graph: - return self._get_graph() - - def _get_redis_con(self, redis_uri: Optional[str]) -> StrictRedis: - if redis_uri: - redis_con = StrictRedis.from_url(redis_uri) - else: - redis_con = StrictRedis() - return redis_con - - def _get_graph(self) -> Graph: - name = f"graph{self.user_id:0>4}" - graph = Graph(name, self.redis_con) - return graph - - def query(self, query: str) -> Graph: - """ - Query against user's graph, - typically return history of specific node - """ - return self.graph.query(query) - - def add_node(self, node: Union[YmirNode, Dict]) -> Graph: - """ - Append a single node to user's graph: - node should be Dataset or Model - """ - if isinstance(node, dict): - node = YmirNode.from_dict(node) - self.graph.add_node(node) - self.graph.commit() - return node - - def add_relationship(self, source: Dict, target: Dict, task: Dict) -> Graph: - """ - Append a new node to user's graph with relationship: - node should be Dataset or Model, - relationship is corresponding Task - """ - relationship = YmirTask.from_dicts(source, target, task) - - s = relationship.src_node - t = relationship.dest_node - query = f"""\ -MERGE (source:{s.label} {s.toString()}) -MERGE (target:{t.label} {t.toString()}) -MERGE (source) -[task:TASK {relationship.toString()}]-> (target) -RETURN source, task, target""" - return self.query(query) - - def query_history(self, source: Dict, max_hops: int) -> Dict: - """ - Get all the related node pointing to source node within max_hops - - source: label, id, hash - """ - nodes = [] - edges = [] - for nodes_, edges_ in self.query_path(source["hash"], max_hops): - nodes += nodes_ - edges += edges_ - - nodes = remove_duplicated_dict(nodes) - edges = remove_duplicated_dict(edges) - return { - "nodes": nodes, - "edges": edges, - } - - def query_path(self, hash: str, max_hops: int) -> Generator: - # todo - # Cannot explicitly use `:Task` label, don't know why - q = f"""\ -MATCH p = (s) <-[task*1..{max_hops}]- (e) -WHERE s.hash = "{hash}" -RETURN p""" - res = self.query(q) - for record in res.result_set: - for path in record: - yield self.parse_path(path) - - def parse_path(self, path: Path) -> Tuple[List, List]: - nodes = {n.id: n.properties for n in path.nodes()} - edges = [ - { - "source": nodes[e.src_node]["hash"], - "target": nodes[e.dest_node]["hash"], - "task": e.properties, - } - for e in path.edges() - ] - return list(nodes.values()), edges - - def close(self) -> None: - print("bye") - - -def remove_duplicated_dict(li: List[Dict]) -> List: - tmp = {json.dumps(i, sort_keys=True) for i in li} - return [json.loads(i) for i in tmp] diff --git a/ymir/backend/src/ymir_app/app/utils/ymir_controller.py b/ymir/backend/src/ymir_app/app/utils/ymir_controller.py index de438a3902..eaccd4b533 100644 --- a/ymir/backend/src/ymir_app/app/utils/ymir_controller.py +++ b/ymir/backend/src/ymir_app/app/utils/ymir_controller.py @@ -7,13 +7,16 @@ import grpc from fastapi.logger import logger -from google.protobuf import json_format # type: ignore +from google.protobuf.json_format import MessageToDict +from google.protobuf.text_format import MessageToString from app.config import settings -from app.constants.state import TaskType +from app.constants.state import TaskType, AnnotationType from app.schemas.dataset import ImportStrategy, MergeStrategy -from common_utils.labels import UserLabels +from app.schemas.task import TrainingDatasetsStrategy +from common_utils.labels import UserLabels, userlabels_to_proto from id_definition.task_id import TaskId +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 as mirsvrpb from proto import backend_pb2_grpc as mir_grpc @@ -31,12 +34,33 @@ class ExtraRequestType(enum.IntEnum): evaluate = 603 check_repo = 604 fix_repo = 605 + get_cmd_version = 606 MERGE_STRATEGY_MAPPING = { - MergeStrategy.stop_upon_conflict: mirsvrpb.STOP, - MergeStrategy.prefer_newest: mirsvrpb.HOST, - MergeStrategy.prefer_oldest: mirsvrpb.HOST, + MergeStrategy.stop_upon_conflict: mirsvrpb.MergeStrategy.STOP, + MergeStrategy.prefer_newest: mirsvrpb.MergeStrategy.HOST, + MergeStrategy.prefer_oldest: mirsvrpb.MergeStrategy.HOST, +} + + +TRAINING_DATASET_STRATEGY_MAPPING = { + TrainingDatasetsStrategy.stop: mirsvrpb.MergeStrategy.STOP, + TrainingDatasetsStrategy.as_training: mirsvrpb.MergeStrategy.HOST, + TrainingDatasetsStrategy.as_validation: mirsvrpb.MergeStrategy.GUEST, +} + + +IMPORTING_STRATEGY_MAPPING = { + ImportStrategy.no_annotations: mirsvrpb.UnknownTypesStrategy.UTS_IGNORE, + ImportStrategy.ignore_unknown_annotations: mirsvrpb.UnknownTypesStrategy.UTS_IGNORE, + ImportStrategy.stop_upon_unknown_annotations: mirsvrpb.UnknownTypesStrategy.UTS_STOP, + ImportStrategy.add_unknown_annotations: mirsvrpb.UnknownTypesStrategy.UTS_ADD, +} + +ANNOTATION_TYPE_MAPPING = { + AnnotationType.gt: mirsvrpb.AnnotationType.GT, + AnnotationType.pred: mirsvrpb.AnnotationType.PRED, } @@ -71,7 +95,7 @@ class ControllerRequest: task_id: Optional[str] = None args: Optional[Dict] = None req: Optional[mirsvrpb.GeneralReq] = None - task_parameters: Optional[str] = None + archived_task_parameters: Optional[str] = None def __post_init__(self) -> None: user_hash = gen_user_hash(self.user_id) @@ -79,7 +103,7 @@ def __post_init__(self) -> None: task_hash = self.task_id or gen_task_hash(self.user_id, self.project_id) request = mirsvrpb.GeneralReq( - user_id=user_hash, repo_id=repo_hash, task_id=task_hash, task_parameters=self.task_parameters + user_id=user_hash, repo_id=repo_hash, task_id=task_hash, task_parameters=self.archived_task_parameters ) method_name = "prepare_" + self.type.name @@ -94,85 +118,97 @@ def prepare_create_project(self, request: mirsvrpb.GeneralReq, args: Dict) -> mi return request def prepare_training(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.in_class_ids[:] = args["class_ids"] train_task_req = mirsvrpb.TaskReqTraining() datasets = itertools.chain( - gen_typed_datasets(mirsvrpb.TvtTypeTraining, [args["dataset_hash"]]), - gen_typed_datasets(mirsvrpb.TvtTypeValidation, [args["validation_dataset_hash"]]), + gen_typed_datasets(mir_cmd_pb.TvtTypeTraining, [args["dataset_hash"]]), + gen_typed_datasets(mir_cmd_pb.TvtTypeValidation, [args["validation_dataset_hash"]]), ) for dataset in datasets: train_task_req.in_dataset_types.append(dataset) - train_task_req.in_class_ids[:] = args["class_ids"] - if "model_hash" in args: - request.model_hash = args["model_hash"] + if args.get("preprocess"): + train_task_req.preprocess_config = args["preprocess"] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeTraining + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeTraining req_create_task.training.CopyFrom(train_task_req) - request.req_type = mirsvrpb.TASK_CREATE + if args.get("model_hash"): + request.model_hash = args["model_hash"] + request.model_stage = args["model_stage_name"] + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.singleton_op = args["docker_image"] request.docker_image_config = args["docker_config"] # stop if training_dataset and validation_dataset share any assets - request.merge_strategy = mirsvrpb.STOP + request.merge_strategy = TRAINING_DATASET_STRATEGY_MAPPING[args["strategy"]] request.req_create_task.CopyFrom(req_create_task) return request def prepare_mining(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.in_dataset_ids[:] = [args["dataset_hash"]] mine_task_req = mirsvrpb.TaskReqMining() - if args.get("top_k", None): + if args.get("top_k"): mine_task_req.top_k = args["top_k"] - mine_task_req.in_dataset_ids[:] = [args["dataset_hash"]] mine_task_req.generate_annotations = args["generate_annotations"] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeMining + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeMining req_create_task.mining.CopyFrom(mine_task_req) - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.singleton_op = args["docker_image"] request.docker_image_config = args["docker_config"] request.model_hash = args["model_hash"] + request.model_stage = args["model_stage_name"] request.req_create_task.CopyFrom(req_create_task) return request def prepare_import_data(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: - importing_request = mirsvrpb.TaskReqImporting() - importing_request.asset_dir = args["asset_dir"] + import_dataset_request = mirsvrpb.TaskReqImportDataset() + + import_dataset_request.asset_dir = args["asset_dir"] strategy = args.get("strategy") or ImportStrategy.ignore_unknown_annotations if strategy != ImportStrategy.no_annotations: - importing_request.annotation_dir = args["annotation_dir"] - if strategy == ImportStrategy.ignore_unknown_annotations: - importing_request.name_strategy_ignore = True - else: - importing_request.name_strategy_ignore = False + if args.get("gt_dir"): + import_dataset_request.gt_dir = args["gt_dir"] + if args.get("pred_dir"): + import_dataset_request.pred_dir = args["pred_dir"] + import_dataset_request.clean_dirs = args["clean_dirs"] + + import_dataset_request.unknown_types_strategy = IMPORTING_STRATEGY_MAPPING[strategy] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeImportData - req_create_task.importing.CopyFrom(importing_request) + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeImportData + req_create_task.import_dataset.CopyFrom(import_dataset_request) - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.req_create_task.CopyFrom(req_create_task) return request def prepare_label(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.in_dataset_ids[:] = [args["dataset_hash"]] + request.in_class_ids[:] = args["class_ids"] label_request = mirsvrpb.TaskReqLabeling() label_request.project_name = f"label_${args['dataset_name']}" - label_request.dataset_id = args["dataset_hash"] label_request.labeler_accounts[:] = args["labellers"] - label_request.in_class_ids[:] = args["class_ids"] - label_request.export_annotation = args["keep_annotations"] + + # pre annotation + if args.get("annotation_type"): + label_request.annotation_type = ANNOTATION_TYPE_MAPPING[args["annotation_type"]] + if args.get("extra_url"): label_request.expert_instruction_url = args["extra_url"] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeLabel + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeLabel req_create_task.labeling.CopyFrom(label_request) - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.req_create_task.CopyFrom(req_create_task) return request def prepare_copy_data(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.in_dataset_ids[:] = [args["src_resource_id"]] copy_request = mirsvrpb.TaskReqCopyData() strategy = args.get("strategy") or ImportStrategy.ignore_unknown_annotations if strategy is ImportStrategy.ignore_unknown_annotations: @@ -186,19 +222,19 @@ def prepare_copy_data(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrp copy_request.src_user_id = args["src_user_id"] copy_request.src_repo_id = args["src_repo_id"] - copy_request.src_dataset_id = args["src_resource_id"] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeCopyData + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeCopyData req_create_task.copy.CopyFrom(copy_request) - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.req_create_task.CopyFrom(req_create_task) return request def prepare_inference(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: request.req_type = mirsvrpb.CMD_INFERENCE request.model_hash = args["model_hash"] + request.model_stage = args["model_stage_name"] request.asset_dir = args["asset_dir"] request.singleton_op = args["docker_image"] request.docker_image_config = args["docker_config"] @@ -207,7 +243,7 @@ def prepare_inference(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrp def prepare_add_label(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: request.check_only = args["dry_run"] request.req_type = mirsvrpb.CMD_LABEL_ADD - request.label_collection.CopyFrom(args["labels"].to_proto()) + request.label_collection.CopyFrom(userlabels_to_proto(args["labels"])) return request def prepare_get_label(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: @@ -230,41 +266,39 @@ def prepare_get_gpu_info(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirs return request def prepare_data_fusion(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: - data_fusion_request = mirsvrpb.TaskReqFusion() - data_fusion_request.in_dataset_ids[:] = args["include_datasets"] - data_fusion_request.merge_strategy = MERGE_STRATEGY_MAPPING[args["include_strategy"]] + request.in_dataset_ids[:] = args["include_datasets"] + request.merge_strategy = MERGE_STRATEGY_MAPPING[args.get("strategy", MergeStrategy.stop_upon_conflict)] if args.get("exclude_datasets"): - data_fusion_request.ex_dataset_ids[:] = args["exclude_datasets"] + request.ex_dataset_ids[:] = args["exclude_datasets"] if args.get("include_class_ids"): - data_fusion_request.in_class_ids[:] = args["include_class_ids"] + request.in_class_ids[:] = args["include_class_ids"] if args.get("exclude_class_ids"): - data_fusion_request.ex_class_ids[:] = args["exclude_class_ids"] + request.ex_class_ids[:] = args["exclude_class_ids"] if args.get("sampling_count"): - data_fusion_request.count = args["sampling_count"] + request.sampling_count = args["sampling_count"] else: # not sampling - data_fusion_request.rate = 1 + request.sampling_rate = 1 req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeFusion - req_create_task.fusion.CopyFrom(data_fusion_request) + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeFusion - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.req_create_task.CopyFrom(req_create_task) return request def prepare_import_model(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: - model_importing = mirsvrpb.TaskReqModelImporting() - model_importing.model_package_path = args["model_package_path"] + import_model_request = mirsvrpb.TaskReqImportModel() + import_model_request.model_package_path = args["model_package_path"] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeImportModel - req_create_task.model_importing.CopyFrom(model_importing) + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeImportModel + req_create_task.import_model.CopyFrom(import_model_request) - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.req_create_task.CopyFrom(req_create_task) return request @@ -273,13 +307,13 @@ def prepare_copy_model(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvr copy_request = mirsvrpb.TaskReqCopyData() copy_request.src_user_id = args["src_user_id"] copy_request.src_repo_id = args["src_repo_id"] - copy_request.src_dataset_id = args["src_resource_id"] + request.in_dataset_ids[:] = [args["src_resource_id"]] req_create_task = mirsvrpb.ReqCreateTask() - req_create_task.task_type = mirsvrpb.TaskTypeCopyModel + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeCopyModel req_create_task.copy.CopyFrom(copy_request) - request.req_type = mirsvrpb.TASK_CREATE + request.req_type = mirsvrpb.RequestType.TASK_CREATE request.req_create_task.CopyFrom(req_create_task) return request @@ -289,14 +323,15 @@ def prepare_dataset_infer(self, request: mirsvrpb.GeneralReq, args: Dict) -> mir return self.prepare_mining(request, args) def prepare_evaluate(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: - evaluate_config = mirsvrpb.EvaluateConfig() + evaluate_config = mir_cmd_pb.EvaluateConfig() evaluate_config.conf_thr = args["confidence_threshold"] - evaluate_config.iou_thrs_interval = "0.5:1:0.05" - evaluate_config.need_pr_curve = False + evaluate_config.iou_thrs_interval = args["iou_thrs_interval"] + evaluate_config.need_pr_curve = args["need_pr_curve"] + if args.get("main_ck"): + evaluate_config.main_ck = args["main_ck"] request.req_type = mirsvrpb.CMD_EVALUATE - request.singleton_op = args["gt_dataset_hash"] - request.in_dataset_ids[:] = args["other_dataset_hashes"] + request.in_dataset_ids[:] = [args["dataset_hash"]] request.evaluate_config.CopyFrom(evaluate_config) return request @@ -308,22 +343,31 @@ def prepare_fix_repo(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb request.req_type = mirsvrpb.CMD_REPO_CLEAR return request + def prepare_get_cmd_version(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.req_type = mirsvrpb.CMD_VERSIONS_GET + return request + class ControllerClient: def __init__(self, channel: str = settings.GRPC_CHANNEL) -> None: - self.channel = grpc.insecure_channel(channel) - self.stub = mir_grpc.mir_controller_serviceStub(self.channel) + self.channel_ep = channel def close(self) -> None: - self.channel.close() + pass - def send(self, req: mirsvrpb.GeneralReq) -> Dict: + def send(self, req: mirsvrpb.GeneralReq, verbose: bool = True) -> Dict: logger.info("[controller] request: %s", req.req) - resp = self.stub.data_manage_request(req.req) + with grpc.insecure_channel(self.channel_ep) as channel: + stub = mir_grpc.mir_controller_serviceStub(channel) + resp = stub.data_manage_request(req.req) + if resp.code != 0: raise ValueError(f"gRPC error. response: {resp.code} {resp.message}") - logger.info("[controller] response: %s", resp) - return json_format.MessageToDict( + msg = "[controller] successfully get response" + if verbose: + msg = "%s: %s" % (msg, MessageToString(resp, as_one_line=True)) + logger.info(msg) + return MessageToDict( resp, preserving_proto_field_name=True, use_integers_for_enums=True, @@ -340,11 +384,16 @@ def add_labels(self, user_id: int, new_labels: UserLabels, dry_run: bool) -> Dic def get_labels_of_user(self, user_id: int) -> UserLabels: req = ControllerRequest(ExtraRequestType.get_label, user_id) - resp = self.send(req) + resp = self.send(req, verbose=False) # if not set labels, lost the key label_collection if not resp.get("label_collection"): raise ValueError(f"Missing labels for user {user_id}") - return UserLabels.parse_obj(dict(labels=resp["label_collection"]["labels"])) + return UserLabels.parse_obj( + dict( + labels=resp["label_collection"]["labels"], + ymir_version=resp["label_collection"]["ymir_version"], + ) + ) def create_task( self, @@ -353,7 +402,7 @@ def create_task( task_id: str, task_type: TaskType, args: Optional[Dict], - task_parameters: Optional[str], + archived_task_parameters: Optional[str], ) -> Dict: req = ControllerRequest( type=TaskType(task_type), @@ -361,7 +410,7 @@ def create_task( project_id=project_id, task_id=task_id, args=args, - task_parameters=task_parameters, + archived_task_parameters=archived_task_parameters, ) return self.send(req) @@ -425,6 +474,7 @@ def call_inference( user_id: int, project_id: int, model_hash: Optional[str], + model_stage_name: Optional[str], asset_dir: str, docker_image: Optional[str], docker_config: Optional[str], @@ -437,6 +487,7 @@ def call_inference( project_id=project_id, args={ "model_hash": model_hash, + "model_stage_name": model_stage_name, "asset_dir": asset_dir, "docker_image": docker_image, "docker_config": docker_config, @@ -449,10 +500,10 @@ def create_data_fusion( user_id: int, project_id: int, task_id: str, - task_parameters: Optional[Dict], + args: Optional[Dict], ) -> Dict: req = ControllerRequest( - type=TaskType.data_fusion, user_id=user_id, project_id=project_id, task_id=task_id, args=task_parameters + type=TaskType.data_fusion, user_id=user_id, project_id=project_id, task_id=task_id, args=args ) return self.send(req) @@ -471,23 +522,29 @@ def evaluate_dataset( self, user_id: int, project_id: int, - task_id: str, + user_labels: UserLabels, confidence_threshold: float, - gt_dataset_hash: str, - other_dataset_hashes: List[str], + iou_thrs_interval: str, + need_pr_curve: bool, + main_ck: Optional[str], + dataset_hash: str, ) -> Dict: req = ControllerRequest( type=ExtraRequestType.evaluate, user_id=user_id, project_id=project_id, - task_id=task_id, args={ "confidence_threshold": confidence_threshold, - "gt_dataset_hash": gt_dataset_hash, - "other_dataset_hashes": other_dataset_hashes, + "dataset_hash": dataset_hash, + "iou_thrs_interval": iou_thrs_interval, + "need_pr_curve": need_pr_curve, + "main_ck": main_ck, }, ) - return self.send(req) + resp = self.send(req) + evaluation_result = resp["evaluation"] + convert_class_id_to_keyword(evaluation_result, user_labels) + return {dataset_hash: evaluation_result} def check_repo_status(self, user_id: int, project_id: int) -> bool: req = ControllerRequest( @@ -505,3 +562,63 @@ def fix_repo(self, user_id: int, project_id: int) -> Dict: project_id=project_id, ) return self.send(req) + + def merge_datasets( + self, + user_id: int, + project_id: int, + task_id: str, + dataset_hashes: Optional[List[str]], + ex_dataset_hashes: Optional[List[str]], + merge_strategy: Optional[MergeStrategy] = None, + ) -> Dict: + req = ControllerRequest( + type=TaskType.data_fusion, + user_id=user_id, + project_id=project_id, + task_id=task_id, + args={ + "include_datasets": dataset_hashes, + "exclude_datasets": ex_dataset_hashes, + "strategy": merge_strategy, + }, + ) + return self.send(req) + + def filter_dataset( + self, + user_id: int, + project_id: int, + task_id: str, + dataset_hash: str, + class_ids: Optional[List[int]], + ex_class_ids: Optional[List[int]], + sampling_count: Optional[int] = None, + ) -> Dict: + req = ControllerRequest( + type=TaskType.data_fusion, + user_id=user_id, + project_id=project_id, + task_id=task_id, + args={ + "include_datasets": [dataset_hash], + "include_class_ids": class_ids, + "exclude_class_ids": ex_class_ids, + "sampling_count": sampling_count, + }, + ) + return self.send(req) + + def get_cmd_version(self) -> List[str]: + req = ControllerRequest(type=ExtraRequestType.get_cmd_version, user_id=0) + resp = self.send(req) + return resp["sandbox_versions"] + + +def convert_class_id_to_keyword(obj: Dict, user_labels: UserLabels) -> None: + if isinstance(obj, dict): + for key, value in obj.items(): + if key == "ci_evaluations": + obj[key] = {user_labels.main_name_for_id(k): v for k, v in value.items()} + else: + convert_class_id_to_keyword(obj[key], user_labels) diff --git a/ymir/backend/src/ymir_app/app/utils/ymir_viz.py b/ymir/backend/src/ymir_app/app/utils/ymir_viz.py index a3352cd0dd..f47145edfa 100644 --- a/ymir/backend/src/ymir_app/app/utils/ymir_viz.py +++ b/ymir/backend/src/ymir_app/app/utils/ymir_viz.py @@ -1,113 +1,99 @@ -from dataclasses import asdict, dataclass -from typing import Dict, List, Optional +from dataclasses import asdict, InitVar +import json +from typing import Any, Dict, List, Optional import requests +from requests.exceptions import ConnectionError, Timeout from fastapi.logger import logger -from pydantic import BaseModel - -from app.api.errors.errors import DatasetEvaluationNotFound, ModelNotFound, ModelNotReady +from pydantic import BaseModel, dataclasses, validator, root_validator + +from app.api.errors.errors import ( + DatasetEvaluationNotFound, + DatasetEvaluationMissingAnnotation, + DatasetIndexNotReady, + ModelNotFound, + FailedToParseVizResponse, + VizError, + VizTimeOut, +) from app.config import settings from common_utils.labels import UserLabels -from id_definition.error_codes import VizErrorCode +from id_definition.error_codes import VizErrorCode, CMDResponseCode -@dataclass -class Asset: - url: str - hash: str - annotations: List[Dict] - keywords: List[str] - metadata: Dict +@dataclasses.dataclass +class DatasetAnnotation: + keywords: Dict[str, int] + class_ids_count: Dict[str, int] + negative_assets_count: int + + tags_count_total: Dict + tags_count: Dict + + hist: Optional[Dict] + annos_count: Optional[int] + ave_annos_count: Optional[float] + + eval_class_ids: Optional[List] @classmethod - def from_viz_res(cls, asset_id: str, res: Dict, user_labels: UserLabels) -> "Asset": - annotations = [ - { - "box": annotation["box"], - "keyword": user_labels.get_main_names(annotation["class_id"])[0], - } - for annotation in res["annotations"] - ] - keywords = user_labels.get_main_names(class_ids=res["class_ids"]) - keywords = list(filter(None, keywords)) - metadata = { - "height": res["metadata"]["height"], - "width": res["metadata"]["width"], - "channel": res["metadata"]["image_channels"], - "timestamp": int(res["metadata"]["timestamp"]["start"]), + def from_dict(cls, data: Dict, total_assets_count: int, user_labels: UserLabels) -> "DatasetAnnotation": + ave_annos_count = round(data["annos_count"] / total_assets_count, 2) if total_assets_count else None + keywords = { + user_labels.main_name_for_id(int(class_id)): count for class_id, count in data["class_ids_count"].items() } + eval_class_ids = user_labels.main_name_for_ids(data["eval_class_ids"]) if data.get("eval_class_ids") else None return cls( - get_asset_url(asset_id), - asset_id, - annotations, - keywords, # type: ignore - metadata, + keywords=keywords, + class_ids_count=data["class_ids_count"], + negative_assets_count=data["negative_assets_count"], + tags_count_total=data["tags_count_total"], + tags_count=data["tags_count"], + hist=data.get("annos_hist") or None, + annos_count=data.get("annos_count"), + ave_annos_count=ave_annos_count, + eval_class_ids=eval_class_ids, ) -@dataclass -class Assets: - items: List - total: int +@dataclasses.dataclass +class DatasetInfo: + gt: Optional[DatasetAnnotation] + pred: Optional[DatasetAnnotation] - @classmethod - def from_viz_res(cls, res: Dict, user_labels: UserLabels) -> "Assets": - assets = [ - { - "url": get_asset_url(asset["asset_id"]), - "hash": asset["asset_id"], - "keywords": user_labels.get_main_names(class_ids=asset["class_ids"]), - } - for asset in res["elements"] - ] + cks_count: Dict + cks_count_total: Dict - return cls(items=assets, total=res["total"]) + keywords: Dict + new_types_added: Optional[bool] + total_assets_count: int -@dataclass -class ModelMetaData: - hash: str - map: float - task_parameters: str - executor_config: str + hist: Optional[Dict] = None + total_assets_mbytes: Optional[int] = None - @classmethod - def from_viz_res(cls, res: Dict) -> "ModelMetaData": - return cls(res["model_id"], res["model_mAP"], res["task_parameters"], res["executor_config"]) - - -class VizDataset(BaseModel): - """ - Interface dataclass of VIZ output, defined as DatasetResult in doc: - https://github.com/IndustryEssentials/ymir/blob/master/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml - """ - - total_images_cnt: int - class_ids_count: Dict[int, int] - ignored_labels: Dict[str, int] - negative_info: Dict[str, int] - - -@dataclass -class DatasetMetaData: - keywords: Dict[str, int] - ignored_keywords: Dict[str, int] - negative_info: Dict[str, int] - asset_count: int - keyword_count: int + repo_index_ready: Optional[bool] = None @classmethod - def from_viz_res(cls, res: Dict, user_labels: UserLabels) -> "DatasetMetaData": - viz_dataset = VizDataset(**res) + def from_dict(cls, res: Dict, user_labels: UserLabels) -> "DatasetInfo": + total_assets_count = res["total_assets_count"] + gt = DatasetAnnotation.from_dict(res["gt"], total_assets_count, user_labels) if res.get("gt") else None + pred = DatasetAnnotation.from_dict(res["pred"], total_assets_count, user_labels) if res.get("pred") else None keywords = { - user_labels.get_main_names(class_id)[0]: count for class_id, count in viz_dataset.class_ids_count.items() + "gt": gt.keywords if gt else {}, + "pred": pred.keywords if pred else {}, } return cls( + gt=gt, + pred=pred, + cks_count=res["cks_count"], + cks_count_total=res["cks_count_total"], keywords=keywords, - ignored_keywords=viz_dataset.ignored_labels, - negative_info=viz_dataset.negative_info, - asset_count=viz_dataset.total_images_cnt, - keyword_count=len(keywords), + new_types_added=res.get("new_types_added"), + total_assets_count=total_assets_count, + hist=res.get("assets_hist") or None, + total_assets_mbytes=res.get("total_assets_mbytes"), + repo_index_ready=res.get("query_context", {}).get("repo_index_ready"), ) @@ -117,11 +103,18 @@ class EvaluationScore(BaseModel): fn: int fp: int tp: int + pr_curve: List[Dict] + + +class CKEvaluation(BaseModel): + total: EvaluationScore + sub: Dict[str, EvaluationScore] class VizDatasetEvaluation(BaseModel): ci_evaluations: Dict[int, EvaluationScore] # class_id -> scores ci_averaged_evaluation: EvaluationScore + ck_evaluations: Dict[str, CKEvaluation] class VizDatasetEvaluationResult(BaseModel): @@ -134,82 +127,342 @@ class VizDatasetEvaluationResult(BaseModel): iou_averaged_evaluation: VizDatasetEvaluation +class ViewerAssetRequest(BaseModel): + """ + Payload for viewer GET /assets + """ + + class_ids: Optional[str] + current_asset_id: Optional[str] + cm_types: Optional[str] + cks: Optional[str] + tags: Optional[str] + annotation_types: Optional[str] + limit: Optional[int] + offset: Optional[int] + + @validator("class_ids", "cm_types", "cks", "tags", "annotation_types", pre=True) + def make_str(cls, v: Any) -> Optional[str]: + if v is None: + return v + if isinstance(v, str): + return v + return ",".join(map(str, v)) + + +@dataclasses.dataclass +class ViewerAssetAnnotation: + box: Dict + class_id: int + cm: int + tags: Dict + keyword: Optional[str] = None + user_labels: InitVar[UserLabels] = None + + def __post_init__(self, user_labels: UserLabels) -> None: + self.keyword = user_labels.main_name_for_id(self.class_id) + + +@dataclasses.dataclass +class ViewerAsset: + asset_id: str + class_ids: List[int] + metadata: Dict + gt: List + pred: List + cks: Dict + url: Optional[str] = None + hash: Optional[str] = None + keywords: Optional[List[str]] = None + user_labels: InitVar[UserLabels] = None + + def __post_init__(self, user_labels: UserLabels) -> None: + self.url = get_asset_url(self.asset_id) + self.hash = self.asset_id + self.keywords = user_labels.main_name_for_ids(self.class_ids) + self.gt = [ + ViewerAssetAnnotation( + box=i["box"], + class_id=i["class_id"], + cm=i["cm"], + tags=i["tags"], + user_labels=user_labels, + ) + for i in self.gt + ] + self.pred = [ + ViewerAssetAnnotation( + box=i["box"], + class_id=i["class_id"], + cm=i["cm"], + tags=i["tags"], + user_labels=user_labels, + ) + for i in self.pred + ] + + +@dataclasses.dataclass +class ViewerAssetsResponse: + total_assets_count: int + elements: List[Dict] + total: Optional[int] = None + items: Optional[List] = None + user_labels: InitVar[UserLabels] = None + + def __post_init__(self, user_labels: UserLabels) -> None: + self.total = self.total_assets_count + self.items = [ + ViewerAsset( + asset_id=i["asset_id"], + class_ids=i["class_ids"], + metadata=i["metadata"], + gt=i["gt"], + pred=i["pred"], + cks=i["cks"], + user_labels=user_labels, + ) + for i in self.elements + ] + + @classmethod + def from_dict(cls, data: Dict, user_labels: UserLabels) -> "ViewerAssetsResponse": + return cls(data["total_assets_count"], data["elements"], user_labels=user_labels) + + +class ViewerModelInfoResponse(BaseModel): + hash: str + map: float + task_parameters: str + executor_config: Dict + model_stages: Dict + best_stage_name: str + keywords: Optional[str] + + @root_validator(pre=True) + def make_up_fields(cls, values: Any) -> Any: + keywords = values["executor_config"].get("class_names") + values.update( + hash=values["model_hash"], + map=values["mean_average_precision"], + model_stages=values["stages"], + keywords=json.dumps(keywords) if keywords else None, + ) + return values + + class VizClient: - def __init__(self, *, host: str = settings.VIZ_HOST): - self.host = host + def __init__( + self, user_id: Optional[int] = None, project_id: Optional[int] = None, user_labels: Optional[UserLabels] = None + ) -> None: self.session = requests.Session() - self._user_id = None # type: Optional[str] - self._project_id = None # type: Optional[str] - self._branch_id = None # type: Optional[str] - self._url_prefix = None # type: Optional[str] + self._user_id = f"{user_id:0>4}" if user_id else None + self._project_id = f"{project_id:0>6}" if project_id else None + self._user_labels = user_labels + self._host = f"http://127.0.0.1:{settings.VIEWER_HOST_PORT}" + self._url_prefix = ( + f"{self._host}/api/v1/users/{self._user_id}/repo/{self._project_id}" if user_id and project_id else None + ) def initialize( self, *, user_id: int, project_id: int, - branch_id: str, + user_labels: Optional[UserLabels] = None, ) -> None: self._user_id = f"{user_id:0>4}" self._project_id = f"{project_id:0>6}" - self._branch_id = branch_id - self._url_prefix = f"http://{self.host}/v1/users/{self._user_id}/repositories/{self._project_id}/branches/{self._branch_id}" # noqa: E501 + self._url_prefix = f"{self._host}/api/v1/users/{self._user_id}/repo/{self._project_id}" # noqa: E501 + + if user_labels: + self._user_labels = user_labels def get_assets( self, *, + dataset_hash: str, + asset_hash: Optional[str] = None, keyword_id: Optional[int] = None, + keyword_ids: Optional[List[int]] = None, + cm_types: Optional[List[str]] = None, + cks: Optional[List[str]] = None, + tags: Optional[List[str]] = None, + annotation_types: Optional[List[str]] = None, offset: int = 0, limit: int = 20, - user_labels: UserLabels, - ) -> Assets: - url = f"{self._url_prefix}/assets" - payload = {"class_id": keyword_id, "limit": limit, "offset": offset} - resp = self.session.get(url, params=payload, timeout=settings.VIZ_TIMEOUT) - if not resp.ok: - logger.error("[viz] failed to get assets info: %s", resp.content) - resp.raise_for_status() - res = resp.json()["result"] - logger.info("[viz] get_assets response: %s", res) - return Assets.from_viz_res(res, user_labels) - - def get_asset( + ) -> Dict: + """ + viewer: GET /assets + """ + url = f"{self._url_prefix}/branch/{dataset_hash}/assets" + params = ViewerAssetRequest( + class_ids=keyword_ids, + cm_types=cm_types, + cks=cks, + tags=tags, + annotation_types=annotation_types, + current_asset_id=asset_hash, + limit=limit, + offset=offset, + ).dict(exclude_none=True) + + resp = self.get_resp(url, params=params) + res = self.parse_resp(resp) + assets = ViewerAssetsResponse.from_dict(res, self._user_labels) + return asdict(assets) + + def get_model_info(self, branch_id: str) -> Dict: + """ + viewer: GET /model_info + """ + url = f"{self._url_prefix}/branch/{branch_id}/model_info" + resp = self.get_resp(url) + res = self.parse_resp(resp) + model_info = ViewerModelInfoResponse.parse_obj(res).dict() + return model_info + + def get_dataset_info( + self, dataset_hash: str, user_labels: Optional[UserLabels] = None, check_index_status: bool = False + ) -> Dict: + """ + viewer: GET /dataset_meta_count + """ + user_labels = user_labels or self._user_labels + url = f"{self._url_prefix}/branch/{dataset_hash}/dataset_meta_count" + resp = self.get_resp(url) + res = self.parse_resp(resp) + dataset_info = DatasetInfo.from_dict(res, user_labels=user_labels) + if check_index_status and not dataset_info.repo_index_ready: + logger.error("[viewer] dataset index not ready, try again later") + raise DatasetIndexNotReady() + return asdict(dataset_info) + + def get_dataset_analysis( self, - *, - asset_id: str, - user_labels: UserLabels, - ) -> Optional[Dict]: - url = f"{self._url_prefix}/assets/{asset_id}" - resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) - if not resp.ok: - logger.error("[viz] failed to get asset info: %s", resp.content) - return None - res = resp.json()["result"] - return asdict(Asset.from_viz_res(asset_id, res, user_labels)) - - def get_model(self) -> ModelMetaData: - url = f"{self._url_prefix}/models" - resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) + dataset_hash: str, + keyword_ids: Optional[List[int]] = None, + require_hist: bool = False, + ) -> Dict: + """ + viewer: GET /dataset_stats + """ + url = f"{self._url_prefix}/branch/{dataset_hash}/dataset_stats" + + params = { + "require_assets_hist": require_hist, + "require_annos_hist": require_hist, + } # type: Dict + if keyword_ids: + params["class_ids"] = ",".join(str(k) for k in keyword_ids) + + resp = self.get_resp(url, params=params) res = self.parse_resp(resp) - return ModelMetaData.from_viz_res(res) + dataset_info = DatasetInfo.from_dict(res, self._user_labels) + return asdict(dataset_info) - def get_dataset(self, user_labels: UserLabels) -> DatasetMetaData: - url = f"{self._url_prefix}/datasets" - resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) + def get_negative_count(self, dataset_hash: str, keyword_ids: List[int]) -> int: + url = f"{self._url_prefix}/branch/{dataset_hash}/dataset_stats" + params = {"class_ids": ",".join(str(k) for k in keyword_ids)} + resp = self.get_resp(url, params=params) res = self.parse_resp(resp) - return DatasetMetaData.from_viz_res(res, user_labels) + dataset_info = DatasetInfo.from_dict(res, self._user_labels) + return dataset_info.gt.negative_assets_count if dataset_info.gt else 0 - def get_evaluations(self, user_labels: UserLabels) -> Dict: - url = f"{self._url_prefix}/evaluations" - resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) + def get_class_wise_count(self, dataset_hash: str) -> Dict[str, int]: + url = f"{self._url_prefix}/branch/{dataset_hash}/dataset_meta_count" + resp = self.get_resp(url) res = self.parse_resp(resp) - evaluations = { - dataset_hash: VizDatasetEvaluationResult(**evaluation).dict() for dataset_hash, evaluation in res.items() + dataset_info = DatasetInfo.from_dict(res, user_labels=self._user_labels) + return dataset_info.gt.keywords if dataset_info.gt else {} + + def check_duplication(self, dataset_hashes: List[str], main_dataset_hash: Optional[str] = None) -> Dict: + """ + viewer: GET /dataset_duplication + """ + url = f"{self._url_prefix}/dataset_duplication" + params = { + "candidate_dataset_ids": ",".join(dataset_hashes), + "corrodee_dataset_ids": main_dataset_hash, } - convert_class_id_to_keyword(evaluations, user_labels) - return evaluations + resp = self.get_resp(url, params=params) + duplicated_stats = self.parse_resp(resp) + return duplicated_stats - def parse_resp(self, resp: requests.Response) -> Dict: + def send_metrics( + self, + metrics_group: str, + id: str, + create_time: int, + keyword_ids: List[int], + extra_data: Optional[Dict] = None, + ) -> None: + url = f"{self._host}/api/v1/user_metrics/{metrics_group}" + payload = extra_data or {} + payload.update( + { + "id": id, + "create_time": create_time, + "user_id": self._user_id, + "project_id": self._project_id, + "class_ids": ",".join(map(str, keyword_ids)), + } + ) + self.post(url, payload) + + def query_metrics( + self, + metrics_group: str, + user_id: int, + query_field: str, + bucket: str, + unit: str = "", + limit: int = 10, + keyword_ids: Optional[List[int]] = None, + ) -> Dict: + url = f"{self._host}/api/v1/user_metrics/{metrics_group}" + params = { + "user_id": f"{user_id:0>4}", + "query_field": query_field, + "bucket": bucket, + "unit": unit, + "limit": limit, + } + if keyword_ids: + params["class_ids"] = ",".join(map(str, keyword_ids)) + resp = self.get_resp(url, params=params) + return self.parse_resp(resp) + + def post(self, url: str, payload: Optional[Dict], timeout: int = settings.VIZ_TIMEOUT) -> requests.Response: + logger.info("[viewer] request url %s and payload %s", url, payload) + try: + resp = self.session.post(url, data=payload, timeout=timeout) + except ConnectionError: + raise VizError() + except Timeout: + raise VizTimeOut() + else: + return resp + + def get_resp( + self, + url: str, + params: Optional[Dict] = None, + timeout: int = settings.VIZ_TIMEOUT, + ) -> requests.Response: + logger.info("[viewer] request url %s and params %s", url, params) + if params: + params = {k: v for k, v in params.items() if v is not None} + try: + resp = self.session.get(url, params=params, timeout=timeout) + except ConnectionError: + raise VizError() + except Timeout: + raise VizTimeOut() + else: + return resp + + def parse_resp(self, resp: requests.Response) -> Any: """ response falls in three categories: 1. valid result @@ -217,28 +470,27 @@ def parse_resp(self, resp: requests.Response) -> Dict: 3. model not ready, try to get model later """ if resp.ok: - return resp.json()["result"] - elif resp.status_code == 400: - logger.error("[viz] failed to get model info: %s", resp.content) + res = resp.json()["result"] + logger.info("[viewer] successful response: %s", res) + return res + + logger.error("[viewer] error response: %s", resp.content) + if resp.status_code == 400: error_code = resp.json()["code"] if error_code == VizErrorCode.MODEL_NOT_EXISTS: + logger.error("[viewer] model not found") raise ModelNotFound() elif error_code == VizErrorCode.DATASET_EVALUATION_NOT_EXISTS: + logger.error("[viewer] dataset evaluation not found") raise DatasetEvaluationNotFound() - raise ModelNotReady() + elif error_code == CMDResponseCode.RC_CMD_NO_ANNOTATIONS: + logger.error("[viewer] missing annotations for dataset evaluation") + raise DatasetEvaluationMissingAnnotation() + raise FailedToParseVizResponse() def close(self) -> None: self.session.close() def get_asset_url(asset_id: str) -> str: - return f"{settings.NGINX_PREFIX}/ymir-assets/{asset_id}" - - -def convert_class_id_to_keyword(obj: Dict, user_labels: UserLabels) -> None: - if isinstance(obj, dict): - for key, value in obj.items(): - if key == "ci_evaluations": - obj[key] = {user_labels.get_main_names(k)[0]: v for k, v in value.items()} - else: - convert_class_id_to_keyword(obj[key], user_labels) + return f"{settings.NGINX_PREFIX}/ymir-assets/{asset_id[-2:]}/{asset_id}" diff --git a/ymir/backend/src/ymir_app/prestart.sh b/ymir/backend/src/ymir_app/prestart.sh index ab4784797a..837ce1e2e8 100644 --- a/ymir/backend/src/ymir_app/prestart.sh +++ b/ymir/backend/src/ymir_app/prestart.sh @@ -1,16 +1,20 @@ #! /usr/bin/env bash +set -e + +# Make sure existing mir repos are up to date +python app/check_mir_repo_version.py # Let the DB start python app/backend_pre_start.py +# Get current alembic version if possible +base_alembic_revision=$(alembic current) + # Run migrations for MySQL alembic upgrade head -# Run migrations for clickhouse -python app/init_clickhouse.py - # Create initial data in DB -python app/initial_data.py +python app/initial_data.py ${base_alembic_revision} # Clean legacy tasks python app/clean_tasks.py diff --git a/ymir/backend/src/ymir_app/tests/api/test_dataset.py b/ymir/backend/src/ymir_app/tests/api/test_dataset.py index 5cd5ce0cc5..2551f3f5a4 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_dataset.py +++ b/ymir/backend/src/ymir_app/tests/api/test_dataset.py @@ -29,11 +29,6 @@ def mock_db(mocker): return mocker.Mock() -@pytest.fixture(scope="function") -def mock_graph_db(mocker): - return mocker.Mock() - - @pytest.fixture(scope="function") def mock_viz(mocker): return mocker.Mock() @@ -64,7 +59,7 @@ def test_list_datasets_not_found(self, client: TestClient, normal_user_token_hea r = client.get( f"{settings.API_V1_STR}/datasets/batch", headers=normal_user_token_headers, - params={"ids": "1000,2000,3000"}, + params={"project_id": 233, "ids": "1000,2000,3000"}, ) assert r.status_code == 404 @@ -82,7 +77,7 @@ def test_list_datasets_given_ids( r = client.get( f"{settings.API_V1_STR}/datasets/batch", headers=normal_user_token_headers, - params={"ids": ids}, + params={"project_id": group.project_id, "ids": ids}, ) datasets = r.json()["result"] assert len(datasets) == 3 @@ -254,7 +249,7 @@ def test_create_dataset_fusion_succeed( "dataset_group_id": dataset_group_obj.id, "main_dataset_id": dataset_obj.id, "include_datasets": [], - "include_strategy": 1, + "strategy": 1, "exclude_datasets": [], "include_labels": [], "exclude_labels": [], diff --git a/ymir/backend/src/ymir_app/tests/api/test_graphs.py b/ymir/backend/src/ymir_app/tests/api/test_graphs.py deleted file mode 100644 index 88a00d3617..0000000000 --- a/ymir/backend/src/ymir_app/tests/api/test_graphs.py +++ /dev/null @@ -1,26 +0,0 @@ -from typing import Dict - -from fastapi.testclient import TestClient - -from app.api.api_v1.api import graphs as m -from app.config import settings - - -class TestGetGraph: - def test_get_graph_for_model_node( - self, - client: TestClient, - normal_user_token_headers: Dict[str, str], - mocker, - ): - crud = mocker.Mock() - crud.model.get.return_value = mocker.Mock() - mocker.patch.object(m, "crud", return_value=crud) - r = client.get( - f"{settings.API_V1_STR}/graphs/", - headers=normal_user_token_headers, - params={"type": "model", "id": 1}, - ) - result = r.json()["result"] - assert "nodes" in result - assert "edges" in result diff --git a/ymir/backend/src/ymir_app/tests/api/test_inferences.py b/ymir/backend/src/ymir_app/tests/api/test_inferences.py index b15b9d6482..3f7b32af83 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_inferences.py +++ b/ymir/backend/src/ymir_app/tests/api/test_inferences.py @@ -7,18 +7,19 @@ from app.api.errors.errors import ( FailedtoDownloadError, InvalidInferenceConfig, - ModelNotFound, + ModelStageNotFound, ) from app.config import settings from tests.utils.images import create_docker_image_and_configs -from tests.utils.models import create_model +from tests.utils.models import create_model_stage from tests.utils.utils import random_lower_string, random_url class TestPostInference: def test_call_inference_missing_model(self, client: TestClient, normal_user_token_headers: Dict[str, str], mocker): j = { - "model_id": random.randint(1000, 2000), + "project_id": random.randint(100, 200), + "model_stage_id": random.randint(1000, 2000), "docker_image": random_lower_string(), "image_urls": [random_url()], "docker_image_config": {"mock_docker_image_config": "mock_docker_image_config"}, @@ -28,7 +29,7 @@ def test_call_inference_missing_model(self, client: TestClient, normal_user_toke headers=normal_user_token_headers, json=j, ) - assert r.json()["code"] == ModelNotFound.code + assert r.json()["code"] == ModelStageNotFound.code def test_call_inference_invalid_docker( self, @@ -38,9 +39,10 @@ def test_call_inference_invalid_docker( normal_user_token_headers: Dict[str, str], mocker, ): - model = create_model(db, user_id) + model_stage = create_model_stage(db, user_id) j = { - "model_id": model.id, + "project_id": random.randint(100, 200), + "model_stage_id": model_stage.id, "docker_image": random_lower_string(), "image_urls": [random_url()], "docker_image_config": {"mock_docker_image_config": "mock_docker_image_config"}, @@ -60,10 +62,11 @@ def test_call_inference_download_error( normal_user_token_headers: Dict[str, str], mocker, ): - model = create_model(db, user_id) + model_stage = create_model_stage(db, user_id) image, config = create_docker_image_and_configs(db, image_type=9) j = { - "model_id": model.id, + "project_id": random.randint(100, 200), + "model_stage_id": model_stage.id, "docker_image": image.url, "image_urls": [random_url()], "docker_image_config": {"mock_docker_image_config": "mock_docker_image_config"}, diff --git a/ymir/backend/src/ymir_app/tests/api/test_keywords.py b/ymir/backend/src/ymir_app/tests/api/test_keywords.py index eda381c0de..133137e396 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_keywords.py +++ b/ymir/backend/src/ymir_app/tests/api/test_keywords.py @@ -30,6 +30,7 @@ def test_create_keyword(self, client: TestClient, normal_user_token_headers: Dic json=j, ) res = r.json() + print(f"res: {res}") assert sorted(res["result"]["failed"]) == ['kitten', 'tabby'] @@ -46,4 +47,5 @@ def test_update_keyword( json={"aliases": ["kitten", "tabby"]}, ) res = r.json() + print(f"res: {res}") assert sorted(res["result"]["failed"]) == ["kitten", "tabby"] diff --git a/ymir/backend/src/ymir_app/tests/api/test_stats.py b/ymir/backend/src/ymir_app/tests/api/test_stats.py index c35562660f..1bdbd6c6bf 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_stats.py +++ b/ymir/backend/src/ymir_app/tests/api/test_stats.py @@ -6,30 +6,31 @@ class TestGetStats: - def test_get_stats_for_model( + def test_keywords_recommend( self, client: TestClient, normal_user_token_headers: Dict[str, str], mocker, ): r = client.get( - f"{settings.API_V1_STR}/stats/models/hot", + f"{settings.API_V1_STR}/stats/keywords/recommend", + params={"dataset_ids": "1"}, headers=normal_user_token_headers, ) res = r.json() assert res["code"] == 0 assert res["result"] - def test_get_stats_for_dataset( + def test_get_project_count( self, client: TestClient, normal_user_token_headers: Dict[str, str], mocker, ): r = client.get( - f"{settings.API_V1_STR}/stats/datasets/hot", + f"{settings.API_V1_STR}/stats/projects/count", headers=normal_user_token_headers, - params={"q": "dataset"}, + params={"precision": "day"}, ) res = r.json() assert res["code"] == 0 diff --git a/ymir/backend/src/ymir_app/tests/api/test_task.py b/ymir/backend/src/ymir_app/tests/api/test_task.py index 994701f2a8..79146701d3 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_task.py +++ b/ymir/backend/src/ymir_app/tests/api/test_task.py @@ -45,21 +45,11 @@ def mock_db(mocker): return mocker.Mock() -@pytest.fixture(scope="function") -def mock_graph_db(mocker): - return mocker.Mock() - - @pytest.fixture(scope="function") def mock_viz(mocker): return mocker.Mock() -@pytest.fixture(scope="function") -def mock_clickhouse(mocker): - return mocker.Mock() - - class TestListTasks: def test_list_tasks_succeed( self, diff --git a/ymir/backend/src/ymir_app/tests/conftest.py b/ymir/backend/src/ymir_app/tests/conftest.py index f2fd9990e5..410616e250 100644 --- a/ymir/backend/src/ymir_app/tests/conftest.py +++ b/ymir/backend/src/ymir_app/tests/conftest.py @@ -83,33 +83,27 @@ def fake_viz_client() -> Generator: "keywords": [], "metadata": {}, } - assets = Mock(total=1, items=[asset], keywords={}, negative_info={}) + assets = dict(total=1, items=[asset], keywords={}) + dataset_analysis = dict( + keywords={"gt": ["a"], "pred": ["x"]}, + cks_count={}, + cks_count_total={}, + total_assets_mbytes=20, + total_assets_count=400, + gt=None, + pred=None, + hist={ + "asset_bytes": [], + "asset_area": [], + "asset_quality": [], + "asset_hw_ratio": [], + }, + ) + metric_records = [{"legend": "0", "user_id": "1", "count": 95}, {"legend": "1", "user_id": "1", "count": 20}] client.get_assets.return_value = assets client.get_asset.return_value = asset - yield client - finally: - client.close() - - -def fake_graph_client() -> Generator: - try: - client = Mock() - nodes = [ - { - "id": 1, - "name": "n1", - "hash": "h1", - "type": 1 - }, - { - "id": 2, - "name": "n2", - "hash": "h2", - "type": 2 - }, - ] - edges = [{"target": "h1", "source": "h2", "task": {"id": 1}}] - client.query_history.return_value = {"nodes": nodes, "edges": edges} + client.get_dataset_analysis.return_value = dataset_analysis + client.query_metrics.return_value = metric_records yield client finally: client.close() @@ -142,20 +136,9 @@ def fake_cache_client() -> Generator: client.close() -def fake_clickhouse_client() -> Generator: - try: - client = Mock() - client.get_popular_items.return_value = [(1, 1), (2, 2)] - yield client - finally: - client.close() - - app.dependency_overrides[deps.get_controller_client] = fake_controller_client app.dependency_overrides[deps.get_viz_client] = fake_viz_client -app.dependency_overrides[deps.get_graph_client_of_user] = fake_graph_client app.dependency_overrides[deps.get_cache] = fake_cache_client -app.dependency_overrides[deps.get_clickhouse_client] = fake_clickhouse_client @pytest.fixture(scope="module") diff --git a/ymir/backend/src/ymir_app/tests/crud/test_model.py b/ymir/backend/src/ymir_app/tests/crud/test_model.py index 071b19766b..195c64e1e4 100644 --- a/ymir/backend/src/ymir_app/tests/crud/test_model.py +++ b/ymir/backend/src/ymir_app/tests/crud/test_model.py @@ -115,3 +115,15 @@ def test_update_model_state(self, db: Session) -> None: crud.model.update_state(db, model_id=model.id, new_state=ResultState.ready) assert model.result_state == ResultState.ready + + +class TestUpdateModelRecommondedStage: + def test_update_model_recommonded_stage(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + stage_id = randint(1, 100) + model = create_model(db, user_id, project_id) + assert model.result_state == ResultState.processing + + crud.model.update_recommonded_stage(db, model_id=model.id, stage_id=stage_id) + assert model.recommended_stage == stage_id diff --git a/ymir/backend/src/ymir_app/tests/libs/test_datasets.py b/ymir/backend/src/ymir_app/tests/libs/test_datasets.py index 6f9875f55f..f9bef2c84e 100644 --- a/ymir/backend/src/ymir_app/tests/libs/test_datasets.py +++ b/ymir/backend/src/ymir_app/tests/libs/test_datasets.py @@ -5,28 +5,39 @@ class TestImportDatasetPaths: - def test_import_dataset_paths(self, mocker: Any) -> None: - mocker.patch.object(m, "verify_import_path", return_value=True) - input_path = random_lower_string() + def test_import_dataset_paths(self, mocker: Any, tmp_path: Any) -> None: + input_path = tmp_path + m.settings.SHARED_DATA_DIR = str(tmp_path) + (tmp_path / "images").mkdir() + (tmp_path / "pred").mkdir() p = m.ImportDatasetPaths(input_path, random_lower_string()) - assert p.annotation_dir == f"{input_path}/annotations" - assert p.asset_dir == f"{input_path}/images" + assert p.pred_dir == str(input_path / "pred") + assert p.asset_dir == str(input_path / "images") + assert p.gt_dir is None class TestEvaluateDataset: - def test_evaluate_dataset(self, mocker: Any) -> None: + def test_evaluate_datasets(self, mocker: Any) -> None: user_id = randint(100, 200) project_id = randint(1000, 2000) confidence_threshold = 0.233 - ctrl = mocker.Mock() - viz = mocker.Mock() - viz.get_evaluations.return_value = {} + iou = 0.5 + require_average_iou = True + need_pr_curve = True + ctrl = mocker.Mock(evaluate_dataset=mocker.Mock(return_value={})) user_labels = mocker.Mock() - gt_dataset = mocker.Mock() - other_datasets = [mocker.Mock()] - m.evaluate_dataset( - ctrl, viz, user_id, project_id, user_labels, confidence_threshold, gt_dataset, other_datasets + datasets_mapping = {"a": 1, "b": 2} + m.evaluate_datasets( + ctrl, + user_id, + project_id, + user_labels, + confidence_threshold, + iou, + require_average_iou, + need_pr_curve, + "main_ck", + datasets_mapping, ) ctrl.evaluate_dataset.assert_called() - viz.get_evaluations.assert_called() diff --git a/ymir/backend/src/ymir_app/tests/libs/test_iterations.py b/ymir/backend/src/ymir_app/tests/libs/test_iterations.py new file mode 100644 index 0000000000..b2159022b4 --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/libs/test_iterations.py @@ -0,0 +1,34 @@ +from random import randint +from typing import Any + +from sqlalchemy.orm import Session + +from app.libs import iterations as m +from tests.utils.datasets import create_dataset_record +from tests.utils.projects import create_project_record +from tests.utils.iterations import create_iteration_record + + +class TestCalculateMiningProgress: + def test_calculate_mining_progress(self, db: Session, mocker: Any) -> None: + project = create_project_record(db) + dataset = create_dataset_record(db, user_id=project.user_id, project_id=project.id) + iteration = create_iteration_record(db, project.user_id, project.id, mining_dataset_id=dataset.id) + + id_for_names = mocker.Mock(return_value=([randint(1, 10)] * 3,)) + user_labels = mocker.Mock(id_for_names=id_for_names) + + mocker.patch.object(m, "VizClient") + res = m.calculate_mining_progress(db, user_labels, project.user_id, project.id, iteration.id) + assert "total_mining_ratio" in res + assert "class_wise_mining_ratio" in res + assert "negative_ratio" in res + + +class TestGetTrainingClasses: + def test_get_training_classes(self, db: Session, mocker: Any) -> None: + project = create_project_record(db) + id_for_names = mocker.Mock(return_value=([randint(1, 10)] * 3,)) + user_labels = mocker.Mock(id_for_names=id_for_names) + classes = m.get_training_classes(db, project.id, user_labels) + assert list(classes.keys()) == project.training_targets diff --git a/ymir/backend/src/ymir_app/tests/libs/test_tasks.py b/ymir/backend/src/ymir_app/tests/libs/test_tasks.py index 7e7a0c6e37..a55690a5c5 100644 --- a/ymir/backend/src/ymir_app/tests/libs/test_tasks.py +++ b/ymir/backend/src/ymir_app/tests/libs/test_tasks.py @@ -19,7 +19,7 @@ def test_normalize_task_parameters_succeed(self, mocker: Any) -> Any: params = { "keywords": "cat,dog,boy".split(","), "dataset_id": 1, - "model_id": 233, + "model_stage_id": 233, "name": random_lower_string(5), "else": None, } @@ -55,21 +55,7 @@ def test_normalize_task_parameters_succeed(self, mocker: Any) -> Any: assert res["class_ids"] == [0, 1, 2] assert "dataset_hash" in res assert "model_hash" in res - - -class TestWriteClickhouseMetrics: - def test_write_clickhouse_metrics(self, mocker: Any) -> None: - ch = mocker.Mock() - mocker.patch.object(m, "YmirClickHouse", return_value=ch) - task_info = mocker.Mock(type=TaskType.training.value) - dataset_id = randint(100, 200) - dataset_group_id = randint(1000, 2000) - model_id = randint(10000, 20000) - keywords = [random_lower_string() for _ in range(3)] - - m.write_clickhouse_metrics(task_info, dataset_group_id, dataset_id, model_id, keywords) - ch.save_task_parameter.assert_called() - ch.save_dataset_keyword.assert_called() + assert "model_stage_name" in res class TestCreateSingleTask: @@ -77,7 +63,6 @@ def test_create_single_task(self, db: Session, mocker: Any) -> None: mocker.patch.object(m, "normalize_parameters") ctrl = mocker.Mock() mocker.patch.object(m, "ControllerClient", return_value=ctrl) - mocker.patch.object(m, "YmirClickHouse") user_id = randint(100, 200) project_id = randint(1000, 2000) user_labels = mocker.Mock() @@ -102,21 +87,21 @@ def test_task_result_propriety(self, db: Session, mocker: Any) -> None: ctrl = mocker.Mock() mocker.patch.object(m, "ControllerClient", return_value=ctrl) - viz = mocker.Mock() + viz = mocker.MagicMock() mocker.patch.object(m, "VizClient", return_value=viz) tr = m.TaskResult(db, task_in_db) ctrl.get_labels_of_user.assert_not_called() - viz.get_model.assert_not_called() - viz.get_dataset.assert_not_called() + viz.get_model_info.assert_not_called() + viz.get_dataset_info.assert_not_called() tr.user_labels ctrl.get_labels_of_user.assert_called() - tr.model_info - viz.get_model.assert_called() - tr.dataset_info - viz.get_dataset.assert_called() + tr.model_info() + viz.get_model_info.assert_called() + tr.dataset_info() + viz.get_dataset_info.assert_called() def test_get_dest_group_info_is_dataset(self, db: Session, mocker: Any) -> None: user_id = randint(100, 200) @@ -159,8 +144,8 @@ def test_get_dest_group_info_is_model(self, db: Session, mocker: Any) -> None: class TestShouldRetry: @pytest.mark.asyncio() async def test_should_retry(self, mocker: Any) -> None: - resp = mocker.Mock(ok=False) + resp = mocker.AsyncMock(ok=False) assert await m.should_retry(resp) - resp = mocker.Mock(ok=True) + resp = mocker.AsyncMock(ok=True) assert not await m.should_retry(resp) diff --git a/ymir/backend/src/ymir_app/tests/utils/iterations.py b/ymir/backend/src/ymir_app/tests/utils/iterations.py index 1ec77bb7d4..01fa1de891 100644 --- a/ymir/backend/src/ymir_app/tests/utils/iterations.py +++ b/ymir/backend/src/ymir_app/tests/utils/iterations.py @@ -12,12 +12,15 @@ def create_iteration_record( project_id: Optional[int] = None, iteration_round: Optional[int] = None, previous_iteration: Optional[int] = None, + mining_dataset_id: Optional[int] = None, ): j = { "project_id": project_id or randint(1000, 2000), "iteration_round": iteration_round or 1, "previous_iteration": previous_iteration or 0, } + if mining_dataset_id: + j["mining_dataset_id"] = mining_dataset_id in_ = schemas.IterationCreate(**j) record = crud.iteration.create_with_user_id(db, obj_in=in_, user_id=user_id) return record diff --git a/ymir/backend/src/ymir_app/tests/utils/models.py b/ymir/backend/src/ymir_app/tests/utils/models.py index 8d9b0454c3..50c89d1c52 100644 --- a/ymir/backend/src/ymir_app/tests/utils/models.py +++ b/ymir/backend/src/ymir_app/tests/utils/models.py @@ -1,3 +1,4 @@ +import time from typing import Optional from random import randint @@ -47,3 +48,12 @@ def create_model( ) model = crud.model.create_with_version(db, obj_in=model_in) return model + + +def create_model_stage( + db: Session, user_id: int, group_id: Optional[int] = None, project_id: Optional[int] = None +) -> models.ModelStage: + model = create_model(db, user_id, group_id, project_id) + stage_in = schemas.ModelStageCreate(name=random_lower_string(), map=0.1, timestamp=time.time(), model_id=model.id) + stage = crud.model_stage.create(db, obj_in=stage_in) + return stage diff --git a/ymir/backend/src/ymir_app/tests/utils/projects.py b/ymir/backend/src/ymir_app/tests/utils/projects.py index b439ea8d26..01defe25d5 100644 --- a/ymir/backend/src/ymir_app/tests/utils/projects.py +++ b/ymir/backend/src/ymir_app/tests/utils/projects.py @@ -1,5 +1,5 @@ from random import randint -from typing import Optional +from typing import Optional, List from sqlalchemy.orm import Session @@ -12,10 +12,12 @@ def create_project_record( db: Session, user_id: Optional[int] = None, name: Optional[str] = None, + training_keywords: Optional[List[str]] = None, ): name = name or random_lower_string() user_id = user_id or randint(1, 20) - j = {"name": name, "training_keywords": [random_lower_string() for _ in range(3)]} + training_keywords = training_keywords or [random_lower_string() for _ in range(3)] + j = {"name": name, "training_keywords": training_keywords} in_ = schemas.ProjectCreate(**j) record = crud.project.create_project(db, obj_in=in_, user_id=user_id) diff --git a/ymir/backend/src/ymir_app/tests/utils/test_controller.py b/ymir/backend/src/ymir_app/tests/utils/test_controller.py index f32f7c2d75..4fa07d7d8d 100644 --- a/ymir/backend/src/ymir_app/tests/utils/test_controller.py +++ b/ymir/backend/src/ymir_app/tests/utils/test_controller.py @@ -33,8 +33,8 @@ def test_training(self): "docker_config": "{}", }, ) - assert ret.req.req_type == m.mirsvrpb.TASK_CREATE - assert ret.req.req_create_task.task_type == m.mirsvrpb.TaskTypeTraining + assert ret.req.req_type == m.mirsvrpb.RequestType.TASK_CREATE + assert ret.req.req_create_task.task_type == m.mir_cmd_pb.TaskType.TaskTypeTraining def test_mining(self): task_type = m.TaskType.mining @@ -48,14 +48,15 @@ def test_mining(self): "dataset_hash": random_lower_string(), "top_k": 1000, "model_hash": random_lower_string(), + "model_stage_name": random_lower_string(), "generate_annotations": True, "strategy": MergeStrategy.prefer_newest, "docker_image": "yolov4-training:test", "docker_config": "{}", }, ) - assert ret.req.req_type == m.mirsvrpb.TASK_CREATE - assert ret.req.req_create_task.task_type == m.mirsvrpb.TaskTypeMining + assert ret.req.req_type == m.mirsvrpb.RequestType.TASK_CREATE + assert ret.req.req_create_task.task_type == m.mir_cmd_pb.TaskType.TaskTypeMining def test_label(self): task_type = m.TaskType.label @@ -72,12 +73,12 @@ def test_label(self): "labellers": [], "class_ids": [1, 2], "extra_url": random_url(), - "keep_annotations": True, + "annotation_type": 2, }, ) - assert ret.req.req_type == m.mirsvrpb.TASK_CREATE - assert ret.req.req_create_task.task_type == m.mirsvrpb.TaskTypeLabel - assert ret.req.req_create_task.labeling.export_annotation + assert ret.req.req_type == m.mirsvrpb.RequestType.TASK_CREATE + assert ret.req.req_create_task.task_type == m.mir_cmd_pb.TaskType.TaskTypeLabel + assert ret.req.req_create_task.labeling.annotation_type == m.mirsvrpb.AnnotationType.PRED def test_copy_data(self): task_type = m.TaskType.copy_data @@ -93,8 +94,8 @@ def test_copy_data(self): "src_resource_id": random_lower_string(), }, ) - assert ret.req.req_type == m.mirsvrpb.TASK_CREATE - assert ret.req.req_create_task.task_type == m.mirsvrpb.TaskTypeCopyData + assert ret.req.req_type == m.mirsvrpb.RequestType.TASK_CREATE + assert ret.req.req_create_task.task_type == m.mir_cmd_pb.TaskType.TaskTypeCopyData def test_kill(self, mocker): task_type = m.ExtraRequestType.kill @@ -114,47 +115,49 @@ def test_kill(self, mocker): class TestControllerClient: - def test_close_controller(self, mocker): + def test_send_with_grpc_error(self, mocker): channel_str = random_lower_string() + mock_grpc = mocker.Mock() mocker.patch.object(m, "grpc", return_value=mock_grpc) - cc = m.ControllerClient(channel_str) - cc.channel = mock_channel = mocker.Mock() - cc.close() - mock_channel.close.assert_called() - def test_send_with_grpc_error(self, mocker): - channel_str = random_lower_string() + mock_mir_grpc = mocker.Mock() + mock_mir_grpc.mir_controller_serviceStub().data_manage_request.return_value = mocker.Mock(code=-1) + mocker.patch.object(m, "mir_grpc", mock_mir_grpc) + cc = m.ControllerClient(channel_str) - mock_stub = mocker.Mock() - mock_stub.data_manage_request.return_value = mocker.Mock(code=-1) - cc.stub = mock_stub req = mocker.Mock() with pytest.raises(ValueError): cc.send(req) def test_send(self, mocker): channel_str = random_lower_string() + + mock_grpc = mocker.Mock() + mocker.patch.object(m, "grpc", return_value=mock_grpc) + + mock_mir_grpc = mocker.Mock() + mock_mir_grpc.mir_controller_serviceStub().data_manage_request.return_value = mocker.Mock(code=0) + mocker.patch.object(m, "mir_grpc", mock_mir_grpc) + cc = m.ControllerClient(channel_str) - mock_stub = mocker.Mock() - mock_stub.data_manage_request.return_value = mocker.Mock(code=0) - cc.stub = mock_stub req = mocker.Mock() - mocker.patch.object(m, "json_format") + mocker.patch.object(m, "MessageToDict") + mocker.patch.object(m, "MessageToString") cc.send(req) - mock_stub.data_manage_request.assert_called() def test_inference(self, mocker): user_id = random.randint(1000, 9000) project_id = random.randint(1000, 9000) model_hash = random_lower_string() + model_stage = random_lower_string() asset_dir = random_lower_string() channel_str = random_lower_string() docker_image = random_lower_string() docker_config = random_lower_string() cc = m.ControllerClient(channel_str) cc.send = mock_send = mocker.Mock() - cc.call_inference(user_id, project_id, model_hash, asset_dir, docker_image, docker_config) + cc.call_inference(user_id, project_id, model_hash, model_stage, asset_dir, docker_image, docker_config) mock_send.assert_called() generated_req = mock_send.call_args[0][0].req assert generated_req.user_id == str(user_id) diff --git a/ymir/backend/src/ymir_app/tests/utils/test_files.py b/ymir/backend/src/ymir_app/tests/utils/test_files.py index d38af7c061..235f6dcda5 100644 --- a/ymir/backend/src/ymir_app/tests/utils/test_files.py +++ b/ymir/backend/src/ymir_app/tests/utils/test_files.py @@ -13,9 +13,12 @@ def test_preprocess_dataset(self, tmp_path, mocker): mocker.patch.object(m, "download_file", return_value=b"") mocker.patch.object(m, "decompress_zip", return_value=None) mocker.patch.object(m, "locate_dir", return_value=Path("./a/b")) + mocker.patch.object(m, "locate_annotation_dir", return_value=None) url = random_url() - ret = m.prepare_imported_dataset_dir(url, output_dir) - assert ret == "a" + asset_dir, gt_dir, pred_dir = m.prepare_downloaded_paths(url, output_dir) + assert asset_dir == Path("a/b") + assert gt_dir is None + assert pred_dir is None class TestIsRelativeTo: @@ -27,13 +30,16 @@ def test_is_relative_to(self): class TestIsValidImportPath: - def test_verify_import_path(self, mocker, tmp_path): - anno_dir = tmp_path / "annotations" - anno_dir.mkdir() + def test_locate_import_paths(self, mocker, tmp_path): + asset_dir = tmp_path / "images" + asset_dir.mkdir() m.settings.SHARED_DATA_DIR = str(tmp_path) - assert m.verify_import_path(tmp_path) is None + ret_asset_dir, gt_dir, pred_dir = m.locate_import_paths(tmp_path) + assert ret_asset_dir == asset_dir + assert gt_dir is None + assert pred_dir is None - def test_invalid_import_path(self, mocker, tmp_path): + def test_locate_import_paths_error(self, mocker, tmp_path): m.settings.SHARED_DATA_DIR = str(tmp_path) - with pytest.raises(m.InvalidFileStructure): - m.verify_import_path(tmp_path) + with pytest.raises(FileNotFoundError): + m.locate_import_paths(tmp_path) diff --git a/ymir/backend/src/ymir_app/tests/utils/test_graph.py b/ymir/backend/src/ymir_app/tests/utils/test_graph.py deleted file mode 100644 index 1333b2b5db..0000000000 --- a/ymir/backend/src/ymir_app/tests/utils/test_graph.py +++ /dev/null @@ -1,51 +0,0 @@ -import random - -import pytest - -from app.utils import graph as m -from tests.utils.utils import random_lower_string - - -class TestYmirNode: - def test_create_ymir_node(self): - d = { - "id": random.randint(1000, 2000), - "name": random_lower_string(10), - "hash": random_lower_string(10), - "label": "Model", - } - node = m.YmirNode.from_dict(d) - assert node.label == "Model" - assert node.id == d["id"] - assert node.properties["name"] == d["name"] - assert node.properties["hash"] == d["hash"] - - -@pytest.fixture(autouse=True) -def mock_redis(mocker): - mocker.patch.object(m, "StrictRedis") - - -class TestGraphClient: - def test_query(self, mocker): - mock_graph = mocker.Mock() - mocker.patch.object(m, "Graph", return_value=mock_graph) - q = random_lower_string() - client = m.GraphClient(redis_uri=None) - client.user_id = 2 - client.query(q) - mock_graph.query.assert_called_with(q) - - def test_add_relationship(self, mocker): - mock_graph = mocker.Mock() - mocker.patch.object(m, "Graph", return_value=mock_graph) - - client = m.GraphClient(redis_uri=None) - client.user_id = 2 - client.add_relationship( - {"id": 1, "label": "Dataset"}, - {"id": 2, "label": "Model"}, - {"id": 3, "label": "Task"}, - ) - - mock_graph.query.assert_called() diff --git a/ymir/backend/src/ymir_app/tests/utils/test_viz.py b/ymir/backend/src/ymir_app/tests/utils/test_viz.py index 4bbcce0d15..bde0b72d2b 100644 --- a/ymir/backend/src/ymir_app/tests/utils/test_viz.py +++ b/ymir/backend/src/ymir_app/tests/utils/test_viz.py @@ -31,10 +31,22 @@ class TestAsset: def test_create_asset(self, mock_user_labels, mocker): asset_id = random_lower_string() res = { - "annotations": [ + "asset_id": asset_id, + "pred": [ { - "box": random_lower_string(10), + "box": {}, "class_id": random.randint(1, 20), + "cm": 1, + "tags": {}, + } + ], + "cks": [], + "gt": [ + { + "box": {}, + "class_id": random.randint(1, 20), + "cm": 1, + "tags": {}, } ], "class_ids": list(range(1, 20)), @@ -46,7 +58,15 @@ def test_create_asset(self, mock_user_labels, mocker): }, } - A = m.Asset.from_viz_res(asset_id, res, user_labels=mock_user_labels) + A = m.ViewerAsset( + res["asset_id"], + res["class_ids"], + res["metadata"], + res["gt"], + res["pred"], + res["cks"], + user_labels=mock_user_labels, + ) assert A.url == m.get_asset_url(asset_id) @@ -57,55 +77,114 @@ def test_assets(self, mock_user_labels): { "asset_id": random_lower_string(), "class_ids": [random.randint(1, 80) for _ in range(10)], + "pred": [ + { + "box": {}, + "class_id": random.randint(1, 20), + "cm": 1, + "tags": {}, + } + ], + "gt": [ + { + "box": {}, + "class_id": random.randint(1, 20), + "cm": 1, + "tags": {}, + } + ], + "metadata": { + "height": random.randint(100, 200), + "width": random.randint(100, 200), + "image_channels": random.randint(1, 3), + "timestamp": {"start": time.time()}, + }, + "cks": {}, } ], - "total": 124, + "total_assets_count": 124, } - AS = m.Assets.from_viz_res(res, mock_user_labels) + AS = m.ViewerAssetsResponse(res["total_assets_count"], res["elements"], user_labels=mock_user_labels) assert len(AS.items) == len(res["elements"]) class TestModel: def test_model(self): res = { - "model_id": random_lower_string(), - "model_mAP": random.randint(1, 100) / 100, + "model_hash": random_lower_string(), + "mean_average_precision": random.randint(1, 100) / 100, "task_parameters": "mock_task_parameters", - "executor_config": "mock_executor_config", + "executor_config": {"class_names": "a,b,c".split(",")}, + "stages": { + "epoch-1000": { + "mAP": -1, + "timestamp": 100000000, + }, + "epoch-2000": { + "mAP": 0.3, + "timestamp": 100000001, + }, + "epoch-3000": { + "mAP": 0.83, + "timestamp": 100000002, + }, + }, + "best_stage_name": "epoch-3000", } - M = m.ModelMetaData.from_viz_res(res) - assert M.hash == res["model_id"] - assert M.map == res["model_mAP"] + M = m.ViewerModelInfoResponse.parse_obj(res) + assert M.hash == res["model_hash"] + assert M.map == res["mean_average_precision"] assert M.task_parameters == res["task_parameters"] assert M.executor_config == res["executor_config"] + assert M.model_stages == res["stages"] + assert M.best_stage_name == res["best_stage_name"] class TestDataset: def test_dataset(self, mock_user_labels): res = { "class_ids_count": {3: 34}, - "ignored_labels": {"cat": 5}, - "negative_info": {"negative_images_cnt": 0, "project_negative_images_cnt": 0}, - "total_images_cnt": 1, + "new_types": {"cat": 5}, + "new_types_added": False, + "cks_count_total": {}, + "cks_count": {}, + "total_assets_count": 1, + "pred": { + "class_ids_count": {3: 34}, + "new_types": {"cat": 5}, + "new_types_added": False, + "negative_assets_count": 0, + "tags_count_total": {}, + "tags_count": {}, + "annos_count": 28, + "class_names_count": {"cat": 3}, + "hist": {"anno_area_ratio": [[{"x": 1, "y": 2}]], "anno_quality": [[{"x": 1, "y": 2}]]}, + }, + "gt": {}, + "hist": { + "asset_area": [[{"x": 1, "y": 2}]], + "asset_bytes": [[{"x": 1, "y": 2}]], + "asset_hw_ratio": [[{"x": 1, "y": 2}]], + "asset_quality": [[{"x": 1, "y": 2}]], + }, + "cks": {}, + "total_assets_mbytes": 10, + "total_assets_count": 1, } - M = m.DatasetMetaData.from_viz_res(res, mock_user_labels) - assert M.keyword_count == len(res["class_ids_count"]) - assert M.ignored_keywords == res["ignored_labels"] - assert M.negative_info["negative_images_cnt"] == res["negative_info"]["negative_images_cnt"] - assert M.negative_info["project_negative_images_cnt"] == res["negative_info"]["project_negative_images_cnt"] - assert M.asset_count == res["total_images_cnt"] + M = m.DatasetInfo.from_dict(res, mock_user_labels) + assert "gt" in M.keywords + assert "pred" in M.keywords + assert M.gt is None + assert M.pred class TestVizClient: def test_get_viz_client(self): - host = random_lower_string() - viz = m.VizClient(host=host) - assert viz.host == host + viz = m.VizClient() assert viz.session def test_get_assets(self, mock_user_labels, mocker): - host = random_lower_string() - viz = m.VizClient(host=host) + viz = m.VizClient() mock_session = mocker.Mock() resp = mocker.Mock() res = { @@ -113,9 +192,32 @@ def test_get_assets(self, mock_user_labels, mocker): { "asset_id": random_lower_string(), "class_ids": [random.randint(1, 80) for _ in range(10)], + "pred": [ + { + "box": {}, + "class_id": random.randint(1, 20), + "cm": 1, + "tags": {}, + } + ], + "gt": [ + { + "box": {}, + "class_id": random.randint(1, 20), + "cm": 1, + "tags": {}, + } + ], + "metadata": { + "height": random.randint(100, 200), + "width": random.randint(100, 200), + "image_channels": random.randint(1, 3), + "timestamp": {"start": time.time()}, + }, + "cks": {}, } ], - "total": random.randint(1000, 2000), + "total_assets_count": random.randint(1000, 2000), } resp.json.return_value = {"result": res} mock_session.get.return_value = resp @@ -126,61 +228,38 @@ def test_get_assets(self, mock_user_labels, mocker): viz.initialize( user_id=user_id, project_id=project_id, - branch_id=task_id, - ) - ret = viz.get_assets(user_labels=mock_user_labels) - assert isinstance(ret, m.Assets) - assert ret.total - assert ret.items - assert len(ret.items) == len(res["elements"]) - - def test_get_asset(self, mock_user_labels, mocker): - host = random_lower_string() - viz = m.VizClient(host=host) - mock_session = mocker.Mock() - resp = mocker.Mock() - res = { - "annotations": [ - { - "box": random_lower_string(10), - "class_id": random.randint(1, 80), - } - ], - "class_ids": list(range(1, 20)), - "metadata": { - "height": random.randint(100, 200), - "width": random.randint(100, 200), - "image_channels": random.randint(1, 3), - "timestamp": {"start": time.time()}, - }, - } - resp.json.return_value = {"result": res} - mock_session.get.return_value = resp - viz.session = mock_session - - user_id = random.randint(100, 200) - project_id = random.randint(100, 200) - task_id = random_lower_string() - asset_id = random_lower_string() - viz.initialize( - user_id=user_id, - project_id=project_id, - branch_id=task_id, + user_labels=mock_user_labels, ) - ret = viz.get_asset(asset_id=asset_id, user_labels=mock_user_labels) - assert isinstance(ret, dict) - assert ret["hash"] == asset_id + ret = viz.get_assets(dataset_hash=task_id) + assert isinstance(ret, Dict) + assert ret["total"] + assert ret["items"] + assert len(ret["items"]) == len(res["elements"]) - def test_get_model(self, mocker): - host = random_lower_string() - viz = m.VizClient(host=host) + def test_get_model_info(self, mocker): + viz = m.VizClient() mock_session = mocker.Mock() resp = mocker.Mock() res = { - "model_id": random_lower_string(), - "model_mAP": random.randint(1, 100) / 100, + "model_hash": random_lower_string(), + "mean_average_precision": random.randint(1, 100) / 100, "task_parameters": "mock_task_parameters", - "executor_config": "mock_executor_config", + "executor_config": {"class_names": "a,b,c".split(",")}, + "stages": { + "epoch-1000": { + "mAP": -1, + "timestamp": 100000000, + }, + "epoch-2000": { + "mAP": 0.3, + "timestamp": 100000001, + }, + "epoch-3000": { + "mAP": 0.83, + "timestamp": 100000002, + }, + }, + "best_stage_name": "epoch-3000", } resp.json.return_value = {"result": res} mock_session.get.return_value = resp @@ -189,24 +268,46 @@ def test_get_model(self, mocker): user_id = random.randint(100, 200) project_id = random.randint(100, 200) task_id = random_lower_string() - viz.initialize(user_id=user_id, project_id=project_id, branch_id=task_id) - ret = viz.get_model() - assert isinstance(ret, m.ModelMetaData) - assert ret.hash == res["model_id"] - assert ret.map == res["model_mAP"] - assert ret.task_parameters == res["task_parameters"] - assert ret.executor_config == res["executor_config"] - - def test_get_dataset(self, mock_user_labels, mocker): - host = random_lower_string() - viz = m.VizClient(host=host) + viz.initialize(user_id=user_id, project_id=project_id) + ret = viz.get_model_info(task_id) + assert isinstance(ret, Dict) + assert ret["hash"] == res["model_hash"] + assert ret["map"] == res["mean_average_precision"] + assert ret["task_parameters"] == res["task_parameters"] + assert ret["executor_config"] == res["executor_config"] + + def test_get_dataset_analysis(self, mock_user_labels, mocker): + viz = m.VizClient() mock_session = mocker.Mock() resp = mocker.Mock() res = { "class_ids_count": {3: 34}, - "ignored_labels": {"cat": 5}, - "negative_info": {"negative_images_cnt": 0, "project_negative_images_cnt": 0}, - "total_images_cnt": 1, + "new_types": {"cat": 5}, + "new_types_added": False, + "cks_count_total": {}, + "cks_count": {}, + "total_assets_count": 1, + "pred": { + "class_ids_count": {3: 34}, + "new_types": {"cat": 5}, + "new_types_added": False, + "tags_count_total": {}, + "tags_count": {}, + "negative_assets_count": 0, + "annos_count": 28, + "class_names_count": {"cat": 3}, + "hist": {"anno_area_ratio": [[{"x": 1, "y": 2}]], "anno_quality": [[{"x": 1, "y": 2}]]}, + }, + "gt": {}, + "hist": { + "asset_area": [[{"x": 1, "y": 2}]], + "asset_bytes": [[{"x": 1, "y": 2}]], + "asset_hw_ratio": [[{"x": 1, "y": 2}]], + "asset_quality": [[{"x": 1, "y": 2}]], + }, + "total_assets_mbytes": 10, + "total_assets_count": 1, + "cks": {}, } resp.json.return_value = {"result": res} mock_session.get.return_value = resp @@ -215,18 +316,16 @@ def test_get_dataset(self, mock_user_labels, mocker): user_id = random.randint(100, 200) project_id = random.randint(100, 200) task_id = random_lower_string() - viz.initialize(user_id=user_id, project_id=project_id, branch_id=task_id) - ret = viz.get_dataset(mock_user_labels) - assert isinstance(ret, m.DatasetMetaData) - assert ret.keyword_count == len(res["class_ids_count"]) - assert ret.ignored_keywords == res["ignored_labels"] - assert ret.negative_info["negative_images_cnt"] == res["negative_info"]["negative_images_cnt"] - assert ret.negative_info["project_negative_images_cnt"] == res["negative_info"]["project_negative_images_cnt"] - assert ret.asset_count == res["total_images_cnt"] + viz.initialize(user_id=user_id, project_id=project_id, user_labels=mock_user_labels) + ret = viz.get_dataset_analysis(dataset_hash=task_id) + assert isinstance(ret, Dict) + assert "gt" in ret["keywords"] + assert "pred" in ret["keywords"] + assert ret["gt"] is None + assert ret["pred"] def test_close(self, mocker): - host = random_lower_string() - viz = m.VizClient(host=host) + viz = m.VizClient() viz.session = mock_session = mocker.Mock() viz.close() diff --git a/ymir/backend/src/ymir_controller/client/grpc_client.py b/ymir/backend/src/ymir_controller/client/grpc_client.py deleted file mode 100644 index ea727d05dd..0000000000 --- a/ymir/backend/src/ymir_controller/client/grpc_client.py +++ /dev/null @@ -1,314 +0,0 @@ -import argparse -import logging -import sys -from typing import Any, Dict, Optional - -import grpc -from google.protobuf import json_format - -from controller.utils import invoker_call, revs -from proto import backend_pb2 -from proto import backend_pb2_grpc - - -class ControllerClient: - def __init__(self, channel: str, repo: str, user: str) -> None: - channel = grpc.insecure_channel(channel) - self.stub = backend_pb2_grpc.mir_controller_serviceStub(channel) - self.user = user - self.repo = repo - self.executor_config = '' - self.executor_name = '' - - def process_req(self, req: Any) -> Any: - resp = self.stub.data_manage_request(req) - if resp.code != 0: - raise ValueError(f"gRPC error. response: {resp.code} {resp.message}") - logging.info(json_format.MessageToDict(resp, preserving_proto_field_name=True, use_integers_for_enums=True)) - return resp - - -def _build_cmd_create_user_req(args: Dict) -> backend_pb2.GeneralReq: - return invoker_call.make_cmd_request(user_id=args["user"], task_id=args["tid"], req_type=backend_pb2.USER_CREATE) - - -def _build_cmd_create_repo_req(args: Dict) -> backend_pb2.GeneralReq: - return invoker_call.make_cmd_request(user_id=args["user"], - repo_id=args["repo"], - task_id=args["tid"], - req_type=backend_pb2.REPO_CREATE) - - -def _build_cmd_add_labels_req(args: Dict) -> backend_pb2.GeneralReq: - label_list = args["labels"].split(';') - - label = backend_pb2.Label() - label.id = -1 - label.name = label_list[0] - label.aliases.extend(label_list[1:]) - - label_collection = backend_pb2.LabelCollection() - label_collection.labels.append(label) - return invoker_call.make_cmd_request(user_id=args["user"], - repo_id=args["repo"], - task_id=args["tid"], - label_collection=label_collection, - req_type=backend_pb2.CMD_LABEL_ADD) - - -def _build_cmd_get_labels_req(args: Dict) -> backend_pb2.GeneralReq: - return invoker_call.make_cmd_request(user_id=args["user"], - repo_id=args["repo"], - task_id=args["tid"], - req_type=backend_pb2.CMD_LABEL_GET) - - -def _build_cmd_sampling_req(args: Dict) -> backend_pb2.GeneralReq: - return invoker_call.make_cmd_request(user_id=args["user"], - repo_id=args["repo"], - task_id=args["tid"], - in_dataset_ids=args['in_dataset_ids'], - sampling_count=args['sampling_count'], - sampling_rate=args['sampling_rate'], - req_type=backend_pb2.CMD_SAMPLING) - - -def call_cmd(client: ControllerClient, *, args: Any) -> Optional[str]: - args = vars(args) - req_name = "_build_cmd_{}_req".format(args['task_type']) - req_func = getattr(sys.modules[__name__], req_name) - req = req_func(args) - return client.process_req(req) - - -def _build_task_filter_req(args: Dict) -> backend_pb2.GeneralReq: - filter_request = backend_pb2.TaskReqFilter() - filter_request.in_dataset_ids[:] = args['in_dataset_ids'] - if args.get('in_class_ids', None): - filter_request.in_class_ids[:] = args['in_class_ids'] - if args.get('ex_class_ids', None): - filter_request.ex_class_ids[:] = args['ex_class_ids'] - - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeFilter - req_create_task.filter.CopyFrom(filter_request) - - return req_create_task - - -def _build_task_training_req(args: Dict) -> backend_pb2.GeneralReq: - train_task_req = backend_pb2.TaskReqTraining() - for in_dataset_id in args['in_dataset_ids']: - train_task_req.in_dataset_types.append(revs.build_tvt_dataset_id(in_dataset_id)) - train_task_req.in_class_ids[:] = args['in_class_ids'] - - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeTraining - req_create_task.no_task_monitor = args['no_task_monitor'] - req_create_task.training.CopyFrom(train_task_req) - - return req_create_task - - -def _build_task_mining_req(args: Dict) -> backend_pb2.GeneralReq: - mine_task_req = backend_pb2.TaskReqMining() - if args.get('top_k', None): - mine_task_req.top_k = args['top_k'] - mine_task_req.in_dataset_ids[:] = args['in_dataset_ids'] - if args.get('ex_dataset_ids', None): - mine_task_req.ex_dataset_ids[:] = args['ex_dataset_ids'] - - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeMining - req_create_task.mining.CopyFrom(mine_task_req) - - return req_create_task - - -def _build_task_importing_req(args: Dict) -> backend_pb2.GeneralReq: - importing_request = backend_pb2.TaskReqImporting() - importing_request.asset_dir = args['asset_dir'] - importing_request.annotation_dir = args['annotation_dir'] - importing_request.name_strategy_ignore = args['name_strategy_ignore'] - - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeImportData - req_create_task.no_task_monitor = args['no_task_monitor'] - req_create_task.importing.CopyFrom(importing_request) - - return req_create_task - - -def _build_task_labeling_req(args: Dict) -> backend_pb2.GeneralReq: - labeling_request = backend_pb2.TaskReqLabeling() - labeling_request.dataset_id = args['in_dataset_ids'][0] - labeling_request.labeler_accounts[:] = args['labeler_accounts'] - labeling_request.in_class_ids[:] = args['in_class_ids'] - labeling_request.expert_instruction_url = args['expert_instruction_url'] - labeling_request.project_name = args['project_name'] - if args['keep_annotation']: - labeling_request.export_annotation = True - - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeLabel - req_create_task.no_task_monitor = args['no_task_monitor'] - req_create_task.labeling.CopyFrom(labeling_request) - - return req_create_task - - -def _build_task_fusion_req(args: Dict) -> backend_pb2.GeneralReq: - fusion_request = backend_pb2.TaskReqFusion() - fusion_request.merge_strategy = backend_pb2.MergeStrategy.HOST - fusion_request.in_dataset_ids[:] = args['in_dataset_ids'] - if args.get('ex_dataset_ids', None): - fusion_request.ex_dataset_ids[:] = args['ex_dataset_ids'] - if args.get('in_class_ids', None): - fusion_request.in_class_ids[:] = args['in_class_ids'] - if args.get('ex_class_ids', None): - fusion_request.ex_class_ids[:] = args['ex_class_ids'] - if args.get('sampling_count', 0): - fusion_request.count = args['sampling_count'] - elif args.get('sampling_rate', 0.0): - fusion_request.rate = args['sampling_rate'] - - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeFusion - req_create_task.no_task_monitor = args['no_task_monitor'] - req_create_task.fusion.CopyFrom(fusion_request) - - return req_create_task - - -def _build_task_import_model_req(args: Dict) -> backend_pb2.GeneralReq: - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeImportModel - req_create_task.no_task_monitor = args['no_task_monitor'] - req_create_task.model_importing.model_package_path = args['model_package_path'] - - return req_create_task - - -def _get_executor_config(args: Any) -> str: - executor_config = '' - if args['executor_config']: - with open(args['executor_config'], 'r') as f: - executor_config = f.read() - return executor_config - - -def call_create_task(client: ControllerClient, *, args: Any) -> Optional[str]: - args = vars(args) - req_name = "_build_task_{}_req".format(args["task_type"]) - req_func = getattr(sys.modules[__name__], req_name) - task_req = req_func(args) - req = invoker_call.make_cmd_request(user_id=args["user"], - repo_id=args["repo"], - task_id=args["tid"], - model_hash=args["model_hash"], - req_type=backend_pb2.TASK_CREATE, - req_create_task=task_req, - executant_name=args['tid'], - merge_strategy=1, - docker_image_config=_get_executor_config(args), - singleton_op=args['executor_name'], - task_parameters=args['task_parameters']) - logging.info(json_format.MessageToDict(req, preserving_proto_field_name=True, use_integers_for_enums=True)) - return client.process_req(req) - - -# TODO (phoenix): check and fix this. -# def call_check_task_status(client: ControllerClient, *, args: Any) -> List: -# args = vars(args) -# task_info_req = backend_pb2.ReqGetTaskInfo() -# task_info_req.task_ids[:] = args["task_ids"] -# req = invoker_call.make_cmd_request(user_id=args["user"], -# repo_id=args["repo"], -# task_id=args["tid"], -# req_type=backend_pb2.TASK_INFO, -# task_info_req=task_info_req) -# return client.process_req(req) - - -def get_parser() -> Any: - parser = argparse.ArgumentParser(description="controler caller") - sub_parsers = parser.add_subparsers() - - common_group = parser.add_argument_group("common", "common parameters") - common_group.add_argument( - "-g", - "--grpc", - default="127.0.0.1:50066", - type=str, - help="grpc channel", - ) - common_group.add_argument("-u", "--user", type=str, help="default user") - common_group.add_argument("-r", "--repo", type=str, help="default mir repo") - common_group.add_argument("-t", "--tid", type=str, help="task id") - common_group.add_argument("--model_hash", type=str, help="model hash") - common_group.add_argument("--labels", type=str, help="labels to be added, seperated by comma.") - - # CMD CALL - parser_cmd_call = sub_parsers.add_parser("cmd_call", help="create sync cmd call") - parser_cmd_call.add_argument("--task_type", - choices=["create_user", "create_repo", "add_labels", "get_labels", "sampling"], - type=str, - help="task type") - parser_cmd_call.add_argument("--in_dataset_ids", nargs="*", type=str) - sampling_group = parser_cmd_call.add_mutually_exclusive_group() - sampling_group.add_argument("--sampling_count", type=int, help="sampling count") - sampling_group.add_argument("--sampling_rate", type=float, help="sampling rate") - parser_cmd_call.set_defaults(func=call_cmd) - - # CREATE TASK - parser_create_task = sub_parsers.add_parser("create_task", help="create a long-running task") - parser_create_task.add_argument( - "--task_type", - choices=["filter", "merge", "training", "mining", "importing", "labeling", "fusion", "import_model"], - type=str, - help="task type") - parser_create_task.add_argument("--in_class_ids", nargs="*", type=int) - parser_create_task.add_argument("--ex_class_ids", nargs="*", type=int) - parser_create_task.add_argument("--in_dataset_ids", nargs="*", type=str) - parser_create_task.add_argument("--ex_dataset_ids", nargs="*", type=str) - parser_create_task.add_argument("--asset_dir", type=str) - parser_create_task.add_argument("--annotation_dir", type=str) - parser_create_task.add_argument("--name_strategy_ignore", action="store_true") - parser_create_task.add_argument("--model_package_path", type=str) - parser_create_task.add_argument("--top_k", type=int) - parser_create_task.add_argument("--expert_instruction_url", type=str) - parser_create_task.add_argument("--labeler_accounts", nargs="*", type=str) - parser_create_task.add_argument("--project_name", type=str) - parser_create_task.add_argument("--executor_config", type=str, default='') - parser_create_task.add_argument("--executor_name", type=str, default='') - parser_create_task.add_argument("--keep_annotation", action="store_true") - parser_create_task.add_argument("--no_task_monitor", action="store_true") - sampling_group = parser_create_task.add_mutually_exclusive_group() - sampling_group.add_argument("--sampling_count", type=int, help="sampling count") - sampling_group.add_argument("--sampling_rate", type=float, help="sampling rate") - parser_create_task.add_argument("--task_parameters", type=str, default='') - parser_create_task.set_defaults(func=call_create_task) - - return parser - - -def run() -> None: - logging.basicConfig(stream=sys.stdout, - format='%(levelname)-8s: [%(asctime)s] %(filename)s:%(lineno)s:%(funcName)s(): %(message)s', - datefmt='%Y%m%d-%H:%M:%S', - level=logging.DEBUG) - logging.debug("in debug mode") - - parser = get_parser() - args = parser.parse_args() - if not hasattr(args, "func"): - print("invalid argument, try -h to get more info") - return - - logging.info(f"args: {args}") - client = ControllerClient(args.grpc, args.repo, args.user) - args.func(client, args=args) - - -if __name__ == "__main__": - run() diff --git a/ymir/backend/src/ymir_controller/controller/config/common_task.py b/ymir/backend/src/ymir_controller/controller/config/common_task.py index 553bcaf7fe..ed67c13e19 100644 --- a/ymir/backend/src/ymir_controller/controller/config/common_task.py +++ b/ymir/backend/src/ymir_controller/controller/config/common_task.py @@ -1,14 +1,16 @@ import os -from proto import backend_pb2 +from mir.protos import mir_command_pb2 as mir_cmd_pb # redis service BACKEND_REDIS_URL = os.environ.get("BACKEND_REDIS_URL", "redis://:@127.0.0.1:6379") IMAGE_CONFIG_PATH = { - backend_pb2.TaskType.TaskTypeTraining: "/img-man/training-template.yaml", - backend_pb2.TaskType.TaskTypeMining: "/img-man/mining-template.yaml", - backend_pb2.TaskType.TaskTypeInfer: "/img-man/infer-template.yaml", + mir_cmd_pb.TaskType.TaskTypeTraining: "/img-man/training-template.yaml", + mir_cmd_pb.TaskType.TaskTypeMining: "/img-man/mining-template.yaml", + mir_cmd_pb.TaskType.TaskTypeInfer: "/img-man/infer-template.yaml", } +IMAGE_LIVECODE_CONFIG_PATH = "/img-man/code-access.yaml" + MONITOR_URL = os.environ.get("MONITOR_URL", "http://127.0.0.1:9098") diff --git a/ymir/backend/src/ymir_controller/controller/config/label_task.py b/ymir/backend/src/ymir_controller/controller/config/label_task.py index 2020103772..c5de29084d 100644 --- a/ymir/backend/src/ymir_controller/controller/config/label_task.py +++ b/ymir/backend/src/ymir_controller/controller/config/label_task.py @@ -13,6 +13,7 @@ # task_monitor_file MONITOR_MAPPING_KEY = "monitor_mapping" LABEL_TASK_LOOP_SECONDS = int(env("LABEL_TASK_LOOP_SECONDS", 5 * 60)) +LABEL_TOOL_TIMEOUT = int(env("LABEL_TOOL_TIMEOUT", 600)) # end labelling model env # get label studio tasks's slice number diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py index 524c38f2a5..642d5311e6 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py @@ -2,11 +2,13 @@ import os from abc import ABC, abstractmethod -from google.protobuf import json_format +from google.protobuf.json_format import MessageToDict +from google.protobuf.text_format import MessageToString from common_utils import labels -from controller.utils import checker, errors, metrics, utils +from controller.utils import errors, metrics, utils from id_definition.error_codes import CTLResponseCode +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 @@ -26,55 +28,50 @@ def __init__(self, request: backend_pb2.GeneralReq, assets_config: dict, async_mode: bool = False, - work_dir: str = '') -> None: + work_dir: str = "") -> None: super().__init__() - # check sandbox_root + # check sandbox_root & task_id if not os.path.isdir(sandbox_root): raise errors.MirCtrError(CTLResponseCode.ARG_VALIDATION_FAILED, f"sandbox root {sandbox_root} not found, abort.") - self._sandbox_root = sandbox_root - ret = checker.check_request(request=request, prerequisites=[checker.Prerequisites.CHECK_TASK_ID]) - if (ret.code != CTLResponseCode.CTR_OK): - raise errors.MirCtrError(CTLResponseCode.ARG_VALIDATION_FAILED, f"task_id {request.task_id} error, abort.") + self._request = request + self._sandbox_root = sandbox_root self._task_id = request.task_id + self._assets_config = assets_config + self._async_mode = async_mode + self._work_dir = work_dir or self._prepare_work_dir() # check user_id - user_id = request.user_id - if user_id: - self._user_id = user_id - self._user_root = os.path.join(sandbox_root, user_id) - self._label_storage_file = os.path.join(self._user_root, labels.default_labels_file_name()) - self._user_labels = labels.get_user_labels_from_storage(self._label_storage_file) + self._user_id = request.user_id + self._user_root = "" + self._label_storage_file = "" + self._user_labels = None + if self._user_id: + self._user_root = os.path.join(sandbox_root, self._user_id) + self._label_storage_file = os.path.join(self._user_root, labels.ids_file_name()) + self._user_labels = labels.UserLabels(storage_file=self._label_storage_file) # check repo_id - repo_id = request.repo_id - if repo_id: - if user_id: - self._repo_id = repo_id - self._repo_root = os.path.join(self._user_root, repo_id) - else: - raise errors.MirCtrError(CTLResponseCode.ARG_VALIDATION_FAILED, - "repo id provided, but miss user id.") + self._repo_id = request.repo_id + self._repo_root = "" + if request.repo_id: + if not self._user_id or not self._user_root: + raise errors.MirCtrError(CTLResponseCode.ARG_VALIDATION_FAILED, "repo id provided, but miss user id.") - self._request = request - self._assets_config = assets_config - self._async_mode = async_mode - self._work_dir = work_dir or self.prepare_work_dir() + self._repo_id = request.repo_id + self._repo_root = os.path.join(self._user_root, request.repo_id) self._send_request_metrics() def _send_request_metrics(self) -> None: - # not record internal requests. - if self._request.req_type in [backend_pb2.RequestType.CMD_GPU_INFO_GET]: + # only record task. + if self._request.req_type != backend_pb2.TASK_CREATE: return metrics_name = backend_pb2.RequestType.Name(self._request.req_type) + '.' - if self._request.req_type == backend_pb2.TASK_CREATE: - metrics_name += backend_pb2.TaskType.Name(self._request.req_create_task.task_type) - else: - metrics_name += 'None' + metrics_name += mir_cmd_pb.TaskType.Name(self._request.req_create_task.task_type) metrics.send_counter_metrics(metrics_name) # functions about invoke and pre_invoke @@ -84,9 +81,12 @@ def server_invoke(self) -> backend_pb2.GeneralResp: response = self.pre_invoke() if response.code != CTLResponseCode.CTR_OK: + logging.info(f"pre_invoke fails: {response}") return response - return self.invoke() + response = self.invoke() + logging.info(self._parse_response(response)) + return response @abstractmethod def pre_invoke(self) -> backend_pb2.GeneralResp: @@ -96,21 +96,16 @@ def pre_invoke(self) -> backend_pb2.GeneralResp: def invoke(self) -> backend_pb2.GeneralResp: pass - def prepare_work_dir(self) -> str: - # Only create work_dir for specific tasks. - if self._request.req_type not in [ - backend_pb2.RequestType.TASK_CREATE, - backend_pb2.RequestType.CMD_EVALUATE, - backend_pb2.RequestType.CMD_FILTER, - backend_pb2.RequestType.CMD_MERGE, - backend_pb2.RequestType.CMD_INFERENCE, - backend_pb2.RequestType.CMD_SAMPLING, - ]: + def _need_work_dir(self) -> bool: + raise NotImplementedError + + def _prepare_work_dir(self) -> str: + if not self._need_work_dir(): return '' # Prepare working dir. if self._request.req_type == backend_pb2.RequestType.TASK_CREATE: - type_dir = backend_pb2.TaskType.Name(self._request.req_create_task.task_type) + type_dir = mir_cmd_pb.TaskType.Name(self._request.req_create_task.task_type) else: type_dir = backend_pb2.RequestType.Name(self._request.req_type) @@ -121,8 +116,22 @@ def prepare_work_dir(self) -> str: def __repr__(self) -> str: """show infos about this invoker and the request""" - req_info = json_format.MessageToDict(self._request, - preserving_proto_field_name=True, - use_integers_for_enums=True) + request = self._request + if request.req_type in [ + backend_pb2.RequestType.CMD_GPU_INFO_GET, + backend_pb2.RequestType.CMD_LABEL_ADD, + backend_pb2.RequestType.CMD_LABEL_GET, + backend_pb2.RequestType.CMD_PULL_IMAGE, + backend_pb2.RequestType.CMD_REPO_CHECK, + backend_pb2.RequestType.CMD_REPO_CLEAR, + backend_pb2.RequestType.CMD_TERMINATE, + backend_pb2.RequestType.CMD_VERSIONS_GET, + ]: + return f"task_id: {request.task_id} req_type: {request.req_type}" + + pb_dict = MessageToDict(request, preserving_proto_field_name=True, use_integers_for_enums=True) + return (f"{self.__class__}\n request: {pb_dict}\n assets_config: {self._assets_config}\n" + f" async_mode: {self._async_mode}\n work_dir: {self._work_dir}") - return f" request: \n {req_info} \n async_mode: {self._async_mode} \n work_dir: {self._work_dir}" + def _parse_response(self, response: backend_pb2.GeneralResp) -> str: + return f"task id: {self._request.task_id} response: {MessageToString(response, as_one_line=True)}" diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_checkout.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_checkout.py deleted file mode 100644 index 42e1384141..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_checkout.py +++ /dev/null @@ -1,27 +0,0 @@ -from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class BranchCheckoutInvoker(BaseMirControllerInvoker): - def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, - prerequisites=[ - checker.Prerequisites.CHECK_USER_ID, - checker.Prerequisites.CHECK_REPO_ID, - checker.Prerequisites.CHECK_REPO_ROOT_EXIST, - checker.Prerequisites.CHECK_SINGLETON_OP, - ], - mir_root=self._repo_root) - - def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_BRANCH_CHECKOUT - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - - # invoke command - branch_id = self._request.singleton_op - command = [utils.mir_executable(), 'checkout', '--root', self._repo_root, branch_id] - return utils.run_command(command) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_commit.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_commit.py index 114d021120..da66d85b83 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_commit.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_commit.py @@ -1,26 +1,22 @@ from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker from controller.utils import checker, utils -from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class BranchCommitInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, + return checker.check_invoker(invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, checker.Prerequisites.CHECK_COMMIT_MESSAGE, - ], - mir_root=self._repo_root) + ]) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_COMMIT - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - # invoke command command = [utils.mir_executable(), 'commit', '--root', self._repo_root, '-m', self._request.commit_message] return utils.run_command(command) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_create.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_create.py deleted file mode 100644 index 7760aa1697..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_create.py +++ /dev/null @@ -1,26 +0,0 @@ -from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class BranchCreateInvoker(BaseMirControllerInvoker): - def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, - prerequisites=[ - checker.Prerequisites.CHECK_USER_ID, - checker.Prerequisites.CHECK_REPO_ID, - checker.Prerequisites.CHECK_REPO_ROOT_EXIST, - checker.Prerequisites.CHECK_SINGLETON_OP, - ], - mir_root=self._repo_root) - - def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_BRANCH_CREATE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - - branch_id = self._request.singleton_op - command = [utils.mir_executable(), 'checkout', '--root', self._repo_root, '-b', branch_id] - return utils.run_command(command) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_delete.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_delete.py deleted file mode 100644 index 84e3cf1bc7..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_delete.py +++ /dev/null @@ -1,26 +0,0 @@ -from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class BranchDeleteInvoker(BaseMirControllerInvoker): - def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, - prerequisites=[ - checker.Prerequisites.CHECK_USER_ID, - checker.Prerequisites.CHECK_REPO_ID, - checker.Prerequisites.CHECK_REPO_ROOT_EXIST, - checker.Prerequisites.CHECK_SINGLETON_OP, - ], - mir_root=self._repo_root) - - def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_BRANCH_DEL - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - - force_flag = "-D" if self._request.force else "-d" - cmd = [utils.mir_executable(), 'branch', '--root', self._repo_root, force_flag, self._request.singleton_op] - return utils.run_command(cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_list.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_list.py deleted file mode 100644 index e1d33fe276..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_branch_list.py +++ /dev/null @@ -1,35 +0,0 @@ -from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class BranchListInvoker(BaseMirControllerInvoker): - def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, - prerequisites=[ - checker.Prerequisites.CHECK_USER_ID, - checker.Prerequisites.CHECK_REPO_ID, - checker.Prerequisites.CHECK_REPO_ROOT_EXIST, - ], - mir_root=self._repo_root) - - def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_BRANCH_LIST - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - - command = [utils.mir_executable(), 'branch', '--root', self._repo_root] - response = utils.run_command(command) - - if response.code == 0 and response.message: - message_lines = response.message.splitlines() - for message_line in message_lines: - # remove empty lines - message_line = message_line.strip() - if message_line: - response.ext_strs.append(message_line) - response.message = "" - - return response diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py index 973898134a..9a0ccc7fe1 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py @@ -1,75 +1,51 @@ from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, revs, utils +from controller.utils import checker, utils from id_definition.error_codes import CTLResponseCode +from mir.tools.det_eval_ctl_ops import det_evaluate_datasets +from mir.tools.revs_parser import parse_single_arg_rev from proto import backend_pb2 class EvaluateInvoker(BaseMirControllerInvoker): - """ - invoker for command evaluate - request.in_dataset_ids: predictions - request.singleton_op: ground truth - request.task_id: task hash for this evaluate command - request.evaluate_config.conf_thr: confidence threshold - request.evaluate_config.iou_thrs_interval: from:to:step, default is '0.5:1.0:0.05', end point excluded - """ + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - checker_resp = checker.check_request(request=self._request, + checker_resp = checker.check_invoker(invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, checker.Prerequisites.CHECK_TASK_ID, - checker.Prerequisites.CHECK_SINGLETON_OP, checker.Prerequisites.CHECK_IN_DATASET_IDS, - ], - mir_root=self._repo_root) + ]) if checker_resp.code != CTLResponseCode.CTR_OK: return checker_resp - conf_thr = self._request.evaluate_config.conf_thr - if conf_thr < 0 or conf_thr >= 1: + ec = self._request.evaluate_config + if ec.conf_thr < 0 or ec.conf_thr >= 1: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - f"invalid evaluate conf thr: {conf_thr:.2f}") + f"invalid evaluate conf thr: {ec.conf_thr:.2f}") - iou_thrs_interval: str = self._request.evaluate_config.iou_thrs_interval or '0.5:1.0:0.05' - iou_thrs_interval_list = [float(v) for v in iou_thrs_interval.split(':')] - if len(iou_thrs_interval_list) != 3: + if not ec.iou_thrs_interval: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "invalid evaluate iou thrs interval: {}".format(iou_thrs_interval)) - for v in iou_thrs_interval_list: - if v < 0 or v > 1: - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "invalid evaluate iou thrs interval: {}".format(iou_thrs_interval)) + "invalid evaluate iou thrs interval: {}".format(ec.iou_thrs_interval)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_EVALUATE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - - ec = self._request.evaluate_config - command = [ - utils.mir_executable(), - 'evaluate', - '--root', - self._repo_root, - '--dst-rev', - revs.join_tvt_branch_tid(branch_id=self._request.task_id, tid=self._request.task_id), - '--src-revs', - revs.build_src_revs(in_src_revs=self._request.in_dataset_ids, his_tid=self._request.his_task_id), - '--gt-rev', - revs.join_tvt_branch_tid(branch_id=self._request.singleton_op, tid=self._request.singleton_op), - '-w', - self._work_dir, - '--conf-thr', - f"{ec.conf_thr:.2f}", - '--iou-thrs', - ec.iou_thrs_interval, - ] - if ec.need_pr_curve: - command.append('--need-pr-curve') - - return utils.run_command(command) + rev_tid = parse_single_arg_rev(self._request.in_dataset_ids[0], need_tid=False) + + evaluation = det_evaluate_datasets(mir_root=self._repo_root, + gt_rev_tid=rev_tid, + pred_rev_tid=rev_tid, + evaluate_config=self._request.evaluate_config) + if not evaluation: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "no result generated.") + response = backend_pb2.GeneralResp() + response.code = CTLResponseCode.CTR_OK + response.evaluation.CopyFrom(evaluation) + return response + + def _parse_response(self, response: backend_pb2.GeneralResp) -> str: + return f"Evaluation result config: {response.evaluation.config}" diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_filter.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_filter.py index b4924560f1..a656240ca9 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_filter.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_filter.py @@ -5,22 +5,22 @@ class FilterBranchInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return True + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, + return checker.check_invoker(invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, checker.Prerequisites.CHECK_DST_DATASET_ID, checker.Prerequisites.CHECK_SINGLE_IN_DATASET_ID, - ], - mir_root=self._repo_root) + ]) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_FILTER - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") + if not self._user_labels: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid _user_labels") # invoke command filter_command = [ @@ -31,11 +31,11 @@ def invoke(self) -> backend_pb2.GeneralResp: ] if self._request.in_class_ids: - filter_command.append('-p') + filter_command.append('--cis') filter_command.append(';'.join( - self._user_labels.get_main_names(class_ids=list(self._request.in_class_ids)))) + self._user_labels.main_name_for_ids(class_ids=list(self._request.in_class_ids)))) if self._request.ex_class_ids: - filter_command.append('-P') + filter_command.append('--ex-cis') filter_command.append(';'.join( - self._user_labels.get_main_names(class_ids=list(self._request.ex_class_ids)))) + self._user_labels.main_name_for_ids(class_ids=list(self._request.ex_class_ids)))) return utils.run_command(filter_command) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_gpu_info.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_gpu_info.py index 03cdb83f65..c0a1295710 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_gpu_info.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_gpu_info.py @@ -1,19 +1,17 @@ from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, utils, gpu_utils +from controller.utils import checker, gpu_utils from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class GPUInfoInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, prerequisites=[checker.Prerequisites.CHECK_USER_ID]) + return checker.check_invoker(invoker=self, prerequisites=[checker.Prerequisites.CHECK_USER_ID]) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_GPU_INFO_GET - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - available_gpus = gpu_utils.GPUInfo.get_available_gpus() response = backend_pb2.GeneralResp() diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py index 18fc78b795..e6e902d8b7 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py @@ -1,8 +1,6 @@ import json -import logging import os import shutil -from typing import Dict import yaml from PIL import Image @@ -15,76 +13,52 @@ class InferenceCMDInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return True + @classmethod - def gen_inference_config(cls, req_inference_config: str, work_dir: str) -> str: + def gen_inference_config(cls, req_inference_config: str, task_context: dict, work_dir: str) -> str: inference_config = yaml.safe_load(req_inference_config) inference_config_file = os.path.join(work_dir, "inference_config.yaml") with open(inference_config_file, "w") as f: - yaml.dump({'executor_config': inference_config}, f) + yaml.dump({'executor_config': inference_config, 'task_context': task_context}, f) return inference_config_file @classmethod - def check_picture(cls, one_picture: str) -> bool: - img = Image.open(one_picture) - img_type = img.format.lower() - if img_type in ["png", "jpeg", "jpg"]: - return True - else: - logging.warning(f"image error: {one_picture}") - return False + def prepare_inference_assets(cls, asset_dir: str, dst_dir: str) -> str: + dst_assets = os.path.join(dst_dir, "assets") + os.makedirs(dst_assets, exist_ok=True) - @classmethod - def prepare_inference_picture(cls, source_path: str, work_dir: str) -> str: - inference_picture_directory = os.path.join(work_dir, "inference_picture") - os.makedirs(inference_picture_directory, exist_ok=True) - - for root, _, files in os.walk(source_path): - for one_pic in files: - media_file = os.path.join(root, one_pic) - if cls.check_picture(media_file): - shutil.copy(media_file, inference_picture_directory) - - media_files = [os.path.join(inference_picture_directory, f) for f in os.listdir(inference_picture_directory)] - index_file = os.path.join(work_dir, "inference_pic_index.txt") + media_files = [] + for root, _, files in os.walk(asset_dir): + for asset_fileame in files: + asset_src_file = os.path.join(root, asset_fileame) + + if Image.open(asset_src_file).format.lower() in ["png", "jpeg", "jpg"]: + shutil.copy(asset_src_file, dst_assets) + media_files.append(os.path.join(dst_assets, asset_fileame)) + + index_file = os.path.join(dst_dir, "index.txt") with open(index_file, "w") as f: f.write("\n".join(media_files)) return index_file - @classmethod - def get_inference_result(cls, work_dir: str) -> Dict: - infer_result_file = os.path.join(work_dir, "out", "infer-result.json") - with open(infer_result_file) as f: - infer_result = json.load(f) - - return infer_result - - @classmethod - def generate_inference_response(cls, inference_result: Dict) -> backend_pb2.GeneralResp: - resp = utils.make_general_response(CTLResponseCode.CTR_OK, "") - result = dict(imageAnnotations=inference_result["detection"]) - resp_inference = backend_pb2.RespCMDInference() - json_format.ParseDict(result, resp_inference, ignore_unknown_fields=False) - resp.detection.CopyFrom(resp_inference) - - return resp - def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID], - mir_root=self._repo_root, ) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_INFERENCE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") + if not self._user_labels: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid _user_labels") - index_file = self.prepare_inference_picture(self._request.asset_dir, self._work_dir) - config_file = self.gen_inference_config(self._request.docker_image_config, self._work_dir) + index_file = self.prepare_inference_assets(asset_dir=self._request.asset_dir, dst_dir=self._work_dir) + config_file = self.gen_inference_config(req_inference_config=self._request.docker_image_config, + task_context={'server_runtime': self._assets_config['server_runtime']}, + work_dir=self._work_dir) self.inference_cmd( repo_root=self._repo_root, @@ -92,19 +66,44 @@ def invoke(self) -> backend_pb2.GeneralResp: config_file=config_file, model_location=self._assets_config["modelskvlocation"], model_hash=self._request.model_hash, + model_stage=self._request.model_stage, index_file=index_file, executor=self._request.singleton_op, ) - inference_result = self.get_inference_result(self._work_dir) - return self.generate_inference_response(inference_result) + infer_result_file = os.path.join(self._work_dir, "out", "infer-result.json") + if not os.path.isfile(infer_result_file): + return utils.make_general_response(CTLResponseCode.DOCKER_IMAGE_ERROR, "inference result not found.") + with open(infer_result_file) as f: + infer_result = json.load(f) + + resp = utils.make_general_response(CTLResponseCode.CTR_OK, "") + detections = infer_result.get("detection") + if not isinstance(detections, dict): + return resp + + # class_id should be updated, as it was from outside model. + for _, annos_dict in detections.items(): + if "annotations" in annos_dict: + if "boxes" not in annos_dict: + annos_dict["boxes"] = annos_dict["annotations"] + del annos_dict["annotations"] + + annos = annos_dict.get("boxes", []) + for annotation in annos: + annotation["class_id"] = self._user_labels.id_for_names(names=annotation["class_name"], + raise_if_unknown=False)[0][0] + + json_format.ParseDict(dict(image_annotations=detections), resp.detection, ignore_unknown_fields=False) + + return resp @classmethod def inference_cmd(cls, repo_root: str, work_dir: str, model_location: str, config_file: str, model_hash: str, - index_file: str, executor: str) -> backend_pb2.GeneralResp: + model_stage: str, index_file: str, executor: str) -> backend_pb2.GeneralResp: infer_cmd = [ utils.mir_executable(), 'infer', '--root', repo_root, '-w', work_dir, '--model-location', model_location, - '--index-file', index_file, '--model-hash', model_hash, '--task-config-file', config_file, "--executor", - executor + '--index-file', index_file, '--model-hash', f"{model_hash}@{model_stage}", '--task-config-file', + config_file, "--executor", executor ] return utils.run_command(infer_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py index dbc3b0f821..76212973cd 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py @@ -9,29 +9,26 @@ class InitInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_NOT_EXIST, ], - mir_root=self._repo_root, ) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = [backend_pb2.RequestType.CMD_INIT, backend_pb2.RequestType.REPO_CREATE] - if self._request.req_type not in expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - repo_path = pathlib.Path(self._repo_root) repo_path.mkdir(parents=True, exist_ok=True) link_dst_dir = os.path.join(self._repo_root, '.mir') os.makedirs(link_dst_dir, exist_ok=True) - link_dst_file = os.path.join(link_dst_dir, labels.default_labels_file_name()) + link_dst_file = os.path.join(link_dst_dir, labels.ids_file_name()) os.link(self._label_storage_file, link_dst_file) command = [utils.mir_executable(), 'init', '--root', self._repo_root] diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_add.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_add.py index 691de21c5d..a49ee98145 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_add.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_add.py @@ -6,21 +6,24 @@ class LabelAddInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[checker.Prerequisites.CHECK_USER_ID], ) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_LABEL_ADD - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - response = utils.make_general_response(CTLResponseCode.CTR_OK, "") - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=labels.parse_labels_from_proto(self._request.label_collection), - check_only=self._request.check_only) - response.label_collection.CopyFrom(conflict_labels.to_proto()) + user_labels = labels.UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels( + new_labels=labels.parse_labels_from_proto(self._request.label_collection), + check_only=self._request.check_only + ) + response.label_collection.CopyFrom(labels.userlabels_to_proto(conflict_labels)) return response + + def _parse_response(self, response: backend_pb2.GeneralResp) -> str: + return f"LabelAdd conflict: {len(response.label_collection.labels)}, {response.label_collection.ymir_version}" diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_get.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_get.py index 6404ed6d94..1217d3163b 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_get.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_label_get.py @@ -1,3 +1,4 @@ +from common_utils import labels from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker from controller.utils import utils, checker from id_definition.error_codes import CTLResponseCode @@ -5,18 +6,27 @@ class LabelGetInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[checker.Prerequisites.CHECK_USER_ID], ) def invoke(self) -> backend_pb2.GeneralResp: + if not self._user_labels: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid _user_labels") + expected_type = backend_pb2.RequestType.CMD_LABEL_GET if self._request.req_type != expected_type: return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, f"expected: {expected_type} vs actual: {self._request.req_type}") response = utils.make_general_response(CTLResponseCode.CTR_OK, "") - response.label_collection.CopyFrom(self._user_labels.to_proto()) + response.label_collection.CopyFrom(labels.userlabels_to_proto(self._user_labels)) return response + + def _parse_response(self, response: backend_pb2.GeneralResp) -> str: + return f"LabelGet: {len(response.label_collection.labels)}, version: {response.label_collection.ymir_version}" diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_log.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_log.py deleted file mode 100644 index ff846b349e..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_log.py +++ /dev/null @@ -1,46 +0,0 @@ -import logging -import re - -from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class LogInvoker(BaseMirControllerInvoker): - def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, - prerequisites=[ - checker.Prerequisites.CHECK_USER_ID, - checker.Prerequisites.CHECK_REPO_ID, - checker.Prerequisites.CHECK_REPO_ROOT_EXIST, - ], - mir_root=self._repo_root) - - def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_LOG - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - - command = [utils.mir_executable(), 'log', '--root', self._repo_root] - response = utils.run_command(command) - - if response.code == 0 and response.message: - message_lines = response.message.splitlines() - logging.info(f"message_lines: {message_lines}") - log_components = [] # List[str] - for line in message_lines: - if re.fullmatch(r"^commit +[0-9a-z]{40}$", line): - # finds a new part - commit_id = line.split(" ")[-1] - log_component = "commit {}".format(commit_id) - log_components.append(log_component) - else: - # belongs to previous part - log_component = log_components[-1] - log_component = "{}\n{}".format(log_component, line) - log_components[-1] = log_component - response.ext_strs.extend(log_components) - - return response diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_merge.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_merge.py index b696a34e99..486ad15e1a 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_merge.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_merge.py @@ -5,26 +5,23 @@ class MergeInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return True + def pre_invoke(self) -> backend_pb2.GeneralResp: if not self._request.in_dataset_ids and not self._request.ex_dataset_ids: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, 'one of include/exclude branches is required.') - return checker.check_request(request=self._request, + return checker.check_invoker(invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, checker.Prerequisites.CHECK_DST_DATASET_ID, - ], - mir_root=self._repo_root) + ]) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_MERGE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - command = [ utils.mir_executable(), 'merge', '--root', self._repo_root, '--dst-rev', revs.join_tvt_branch_tid(branch_id=self._request.dst_dataset_id, tid=self._task_id), '-s', diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_pull_image.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_pull_image.py index f85aa69b49..28bca8eaf2 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_pull_image.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_pull_image.py @@ -13,11 +13,11 @@ class ImageHandler(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, - prerequisites=[checker.Prerequisites.CHECK_USER_ID], - ) + return checker.check_invoker(invoker=self, prerequisites=[checker.Prerequisites.CHECK_USER_ID]) @staticmethod def convert_image_config(raw_image_config: str) -> Optional[str]: @@ -34,11 +34,6 @@ def convert_image_config(raw_image_config: str) -> Optional[str]: return json.dumps(image_config) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_PULL_IMAGE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - check_image_command = ['docker', 'image', 'inspect', self._request.singleton_op, '--format', 'ignore_me'] check_response = utils.run_command(check_image_command) if check_response.code != CTLResponseCode.CTR_OK: @@ -61,6 +56,16 @@ def invoke(self) -> backend_pb2.GeneralResp: if image_config: response.docker_image_config[image_type] = image_config + # livecode + config_command = [ + 'docker', 'run', '--rm', self._request.singleton_op, + 'cat', common_task_config.IMAGE_LIVECODE_CONFIG_PATH + ] + config_response = utils.run_command(config_command) + livecode_config = self.convert_image_config(config_response.message) + if livecode_config: + response.enable_livecode = True + if len(response.docker_image_config) == 0: return utils.make_general_response( CTLResponseCode.DOCKER_IMAGE_ERROR, diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py index 60b4f4d20d..294d3345b7 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py @@ -6,23 +6,20 @@ class RepoCheckInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, ], - mir_root=self._repo_root, ) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_REPO_CHECK - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - response = backend_pb2.GeneralResp() response.code = CTLResponseCode.CTR_OK command = [utils.mir_executable(), 'status', '--root', self._repo_root] diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py index b2172ec2c7..0b2af50539 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py @@ -1,5 +1,5 @@ from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker -from controller.utils import checker, invoker_call, utils +from controller.utils import checker, invoker_call from id_definition.error_codes import CTLResponseCode from controller.invoker.invoker_cmd_branch_commit import BranchCommitInvoker from controller.invoker.invoker_cmd_repo_check import RepoCheckInvoker @@ -7,23 +7,20 @@ class RepoClearInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, ], - mir_root=self._repo_root, ) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_REPO_CLEAR - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - request = self._request check_ret = invoker_call.make_invoker_cmd_call( invoker=RepoCheckInvoker, diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sampling.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sampling.py index 4442cae21f..6769b058ec 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sampling.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sampling.py @@ -1,27 +1,23 @@ from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker from controller.utils import checker, revs, utils -from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class SamplingInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return True + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request(request=self._request, + return checker.check_invoker(invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, checker.Prerequisites.CHECK_DST_DATASET_ID, checker.Prerequisites.CHECK_TASK_ID, - ], - mir_root=self._repo_root) + ]) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_SAMPLING - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - command = [ utils.mir_executable(), 'sampling', diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sandbox_version.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sandbox_version.py new file mode 100644 index 0000000000..f06883163c --- /dev/null +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_sandbox_version.py @@ -0,0 +1,20 @@ +from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker +from controller.utils import utils +from common_utils.sandbox_util import detect_sandbox_src_versions +from id_definition.error_codes import CTLResponseCode +from proto import backend_pb2 + + +class SandboxVersionInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + + def pre_invoke(self) -> backend_pb2.GeneralResp: + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + def invoke(self) -> backend_pb2.GeneralResp: + response = backend_pb2.GeneralResp() + response.code = CTLResponseCode.CTR_OK + response.sandbox_versions[:] = detect_sandbox_src_versions(self._sandbox_root) + + return response diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_terminate.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_terminate.py index 06fd424ae3..70d4fb7501 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_terminate.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_terminate.py @@ -7,13 +7,17 @@ from controller.utils import checker, utils from controller.utils.redis import rds from id_definition.error_codes import CTLResponseCode +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 class CMDTerminateInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[checker.Prerequisites.CHECK_USER_ID], ) @@ -24,15 +28,10 @@ def get_project_id_by_task_id(self, task_id: str) -> int: return content["project_id"] # type: ignore def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.CMD_TERMINATE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - if self._request.terminated_task_type in [ - backend_pb2.TaskType.TaskTypeTraining, - backend_pb2.TaskType.TaskTypeMining, - backend_pb2.TaskType.TaskTypeDatasetInfer, + mir_cmd_pb.TaskType.TaskTypeTraining, + mir_cmd_pb.TaskType.TaskTypeMining, + mir_cmd_pb.TaskType.TaskTypeDatasetInfer, ]: container_command = ['docker', 'rm', '-f', self._request.executant_name] container_response = utils.run_command(container_command) @@ -40,7 +39,7 @@ def invoke(self) -> backend_pb2.GeneralResp: logging.warning(container_response.message) sentry_sdk.capture_message(container_response.message) return container_response - elif self._request.terminated_task_type == backend_pb2.TaskType.TaskTypeLabel: + elif self._request.terminated_task_type == mir_cmd_pb.TaskType.TaskTypeLabel: project_id = self.get_project_id_by_task_id(self._request.executant_name) label_instance = utils.create_label_instance() label_instance.delete_unlabeled_task(project_id) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_user_create.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_user_create.py index 4213ef8c57..ec19cdb993 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_user_create.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_user_create.py @@ -1,4 +1,4 @@ -import pathlib +import os from common_utils import labels from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker @@ -8,24 +8,21 @@ class UserCreateInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return False + def pre_invoke(self) -> backend_pb2.GeneralResp: - return checker.check_request( - request=self._request, + return checker.check_invoker( + invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, ], - mir_root='', ) def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.USER_CREATE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - # create user root - pathlib.Path(self._user_root).mkdir(parents=True, exist_ok=True) + os.makedirs(self._user_root, exist_ok=True) # create label file - labels.create_empty(label_storage_file=self._label_storage_file) + labels.load_or_create_userlabels(label_storage_file=self._label_storage_file, create_ok=True) return utils.make_general_response(code=CTLResponseCode.CTR_OK, message='') diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_base.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_base.py index b212672e2b..3964bc84f2 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_base.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_base.py @@ -1,7 +1,8 @@ +from distutils.util import strtobool import logging import os import threading -from typing import Dict, List +from typing import Any, Callable, Dict, List, Optional, Tuple import yaml @@ -13,19 +14,26 @@ from proto import backend_pb2 +SubTaskType = Callable[ + [backend_pb2.GeneralReq, UserLabels, str, Dict[str, str], str, str, str, str, Optional[str], List[str]], + backend_pb2.GeneralResp] + + class TaskBaseInvoker(BaseMirControllerInvoker): + def _need_work_dir(self) -> bool: + return True + def pre_invoke(self) -> backend_pb2.GeneralResp: # still in sync mode. - checker_ret = checker.check_request(request=self._request, + checker_ret = checker.check_invoker(invoker=self, prerequisites=[ checker.Prerequisites.CHECK_USER_ID, checker.Prerequisites.CHECK_REPO_ID, checker.Prerequisites.CHECK_REPO_ROOT_EXIST, - ], - mir_root=self._repo_root) + ]) if checker_ret.code != CTLResponseCode.CTR_OK: return checker_ret - return self.task_pre_invoke(request=self._request, sandbox_root=self._sandbox_root) + return self.task_pre_invoke(request=self._request) @classmethod def subtask_work_dir(cls, master_work_dir: str, subtask_id: str) -> str: @@ -45,38 +53,33 @@ def subtask_monitor_file(cls, master_work_dir: str, subtask_id: str) -> str: return sub_monitor_file @classmethod - def create_subtask_workdir_monitor( + def _register_subtask_monitor( cls, task_id: str, - user_id: str, master_work_dir: str, - subtask_weights: List[float], + sub_task_id_weights: Dict[str, float], register_monitor: bool, ) -> None: - if not (subtask_weights and task_id and user_id and master_work_dir): + if not (sub_task_id_weights and task_id and master_work_dir): raise errors.MirCtrError(CTLResponseCode.ARG_VALIDATION_FAILED, - "create_subtask_workdir_monitor args error, abort.") + "_register_subtask_monitor args error, abort.") delta = 0.001 - if abs(sum(subtask_weights) - 1) >= delta: + if abs(sum(sub_task_id_weights.values()) - 1) >= delta: raise errors.MirCtrError(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid weights, abort.") sub_monitor_files_weights = {} - for idx in range(len(subtask_weights)): - subtask_id = utils.sub_task_id(task_id, idx) - subtask_monitor_file = cls.subtask_monitor_file(master_work_dir=master_work_dir, subtask_id=subtask_id) + # format: (task_func, weight, sub_task_id) + for sub_task_id in sub_task_id_weights: + subtask_monitor_file = cls.subtask_monitor_file(master_work_dir=master_work_dir, subtask_id=sub_task_id) PercentLogHandler.write_percent_log(log_file=subtask_monitor_file, - tid=utils.sub_task_id(task_id, idx), + tid=sub_task_id, percent=0.0, state=LogState.RUNNING) - sub_monitor_files_weights[subtask_monitor_file] = subtask_weights[idx] + sub_monitor_files_weights[subtask_monitor_file] = sub_task_id_weights[sub_task_id] logging.info(f"task {task_id} logging weights:\n{sub_monitor_files_weights}\n") if register_monitor: - tasks_util.register_monitor_log( - task_id=task_id, - user_id=user_id, - log_path_weights=sub_monitor_files_weights, - ) + tasks_util.register_monitor_log(task_id=task_id, log_path_weights=sub_monitor_files_weights) return @staticmethod @@ -86,51 +89,76 @@ def gen_executor_config_path(work_dir: str) -> str: return os.path.join(work_dir, "task_config.yaml") @staticmethod - def gen_executor_config_lock_gpus(req_executor_config: str, class_names: List, task_parameters: str, - output_config_file: str) -> bool: + def gen_executor_config_lock_gpus(req_executor_config: str, + class_names: List, + task_parameters: str, + output_config_file: str, + assets_config: Dict = {}, + preprocess: Optional[str] = None) -> bool: executor_config = yaml.safe_load(req_executor_config) - task_context = {} + preprocess_config = yaml.safe_load(preprocess) if preprocess else None + task_context: Dict[str, Any] = {} if class_names: executor_config["class_names"] = class_names - # when gpu_count > 0, use gpu model - gpu_count = executor_config["gpu_count"] - if gpu_count > 0: + if task_parameters: + task_context["task_parameters"] = task_parameters + + if preprocess_config: + task_context["preprocess"] = preprocess_config + + task_context['server_runtime'] = assets_config['server_runtime'] + + gpu_count = executor_config.get("gpu_count", 0) + executor_config["gpu_id"] = ",".join([str(i) for i in range(gpu_count)]) + + # Openpai enabled + if strtobool(str(executor_config.get("openpai_enable", "False"))): + openpai_host = assets_config.get("openpai_host", None) + openpai_token = assets_config.get("openpai_token", None) + openpai_storage = assets_config.get("openpai_storage", None) + openpai_user = assets_config.get("openpai_user", "") + openpai_cluster = assets_config.get("openpai_cluster") + openpai_gputype = assets_config.get("openpai_gputype") + logging.info(f"OpenPAI host: {openpai_host}, token: {openpai_token}, " + f"storage: {openpai_storage}, user: {openpai_user}", + f"cluster: {openpai_cluster}, gpu_type: {openpai_gputype}") + + if not (openpai_host and openpai_token and openpai_storage and openpai_user): + raise errors.MirCtrError( + CTLResponseCode.INVOKER_INVALID_ARGS, + "openpai enabled with invalid host, token, storage or user", + ) + task_context["openpai_enable"] = True + task_context["openpai_host"] = openpai_host + task_context["openpai_token"] = openpai_token + task_context["openpai_storage"] = openpai_storage + task_context["openpai_user"] = openpai_user + task_context["openpai_cluster"] = openpai_cluster + task_context["openpai_gputype"] = openpai_gputype + + task_context["available_gpu_id"] = executor_config["gpu_id"] + else: + # lock local gpus. gpu_ids = gpu_utils.GPUInfo().find_gpu_ids_by_config(gpu_count, lock_gpu=True) - if not gpu_ids: + if gpu_ids is None: return False - task_context["available_gpu_id"] = gpu_ids - executor_config['gpu_id'] = ','.join([str(i) for i in range(gpu_count)]) - else: - task_context["available_gpu_id"] = '' - executor_config['gpu_id'] = '' - - if task_parameters: - task_context['task_parameters'] = task_parameters - - config = {'executor_config': executor_config, 'task_context': task_context} with open(output_config_file, "w") as f: - yaml.dump(config, f) + yaml.safe_dump(dict( + executor_config=executor_config, + task_context=task_context, + ), f) return True def invoke(self) -> backend_pb2.GeneralResp: - expected_type = backend_pb2.RequestType.TASK_CREATE - if self._request.req_type != expected_type: - return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, - f"expected: {expected_type} vs actual: {self._request.req_type}") - self.create_subtask_workdir_monitor(task_id=self._task_id, - user_id=self._user_id, - master_work_dir=self._work_dir, - subtask_weights=self.subtask_weights(), - register_monitor=(not self._request.req_create_task.no_task_monitor)) - if self._async_mode: thread = threading.Thread(target=self.task_invoke, args=( + self._task_id, self._sandbox_root, self._repo_root, self._assets_config, @@ -142,7 +170,8 @@ def invoke(self) -> backend_pb2.GeneralResp: thread.start() return utils.make_general_response(CTLResponseCode.CTR_OK, "") else: - return self.task_invoke(sandbox_root=self._sandbox_root, + return self.task_invoke(task_id=self._task_id, + sandbox_root=self._sandbox_root, repo_root=self._repo_root, assets_config=self._assets_config, working_dir=self._work_dir, @@ -150,60 +179,61 @@ def invoke(self) -> backend_pb2.GeneralResp: request=self._request) @classmethod - def task_invoke(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], working_dir: str, + def task_invoke(cls, task_id: str, sandbox_root: str, repo_root: str, assets_config: Dict[str, + str], working_dir: str, user_labels: UserLabels, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: - subtask_weights = cls.subtask_weights() - previous_subtask_id = None - # revsersed, to makesure the last subtask idx is 0. - for subtask_idx in reversed(range(len(subtask_weights))): - logging.info(f"processing subtask {subtask_idx}") - - subtask_id = utils.sub_task_id(request.task_id, subtask_idx) + sub_tasks = cls.register_subtasks(request) + if not sub_tasks: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, 'empty ops') + + # append subtask_id, in revsersed order, to make sure the last subtask idx is 0. + # format: (task_func, weight, sub_task_id) + sub_tasks_join = [(sub_task[0], sub_task[1], utils.sub_task_id(request.task_id, + len(sub_tasks) - 1 - subtask_idx)) + for subtask_idx, sub_task in enumerate(sub_tasks)] + + sub_task_id_weights: Dict[str, float] = {} + for sub_task in sub_tasks_join: + sub_task_id_weights[sub_task[2]] = sub_task[1] + cls._register_subtask_monitor(task_id=task_id, + master_work_dir=working_dir, + sub_task_id_weights=sub_task_id_weights, + register_monitor=(not request.req_create_task.no_task_monitor)) + + in_dataset_ids: List[str] = request.in_dataset_ids + his_task_id: Optional[str] = None + if in_dataset_ids: + his_task_id = in_dataset_ids[0] + for sub_task in sub_tasks_join: + subtask_id = sub_task[2] + logging.info(f"processing subtask {subtask_id}") subtask_work_dir = cls.subtask_work_dir(master_work_dir=working_dir, subtask_id=subtask_id) - subtask_func_name = f"subtask_invoke_{subtask_idx}" - subtask_func = getattr(cls, subtask_func_name) - ret = subtask_func( - sandbox_root=sandbox_root, - repo_root=repo_root, - assets_config=assets_config, - request=request, - subtask_id=subtask_id, - subtask_workdir=subtask_work_dir, - previous_subtask_id=previous_subtask_id, - user_labels=user_labels, + ret = sub_task[0]( + request, + user_labels, + sandbox_root, + assets_config, + repo_root, + task_id, + subtask_id, + subtask_work_dir, + his_task_id, + in_dataset_ids, ) if ret.code != CTLResponseCode.CTR_OK: - logging.info(f"subtask failed: {subtask_func_name}\nret: {ret}") + logging.info(f"subtask failed: {subtask_id}\nret: {ret}") return ret - previous_subtask_id = subtask_id + his_task_id = subtask_id + in_dataset_ids = [task_id] return ret - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: - raise NotImplementedError - - @classmethod - def subtask_weights(cls) -> List: - # Subtasks are called in reversed order of index so the index 0 submask is last called, - # as a result, the weight list should also be organized in reversed index order. - raise NotImplementedError - - @classmethod - def subtask_invoke_2(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: - raise NotImplementedError - - @classmethod - def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: raise NotImplementedError @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + # register sub_tasks in executing orders. raise NotImplementedError diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_copy.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_copy.py index c44564b14a..bde6ca4263 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_copy.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_copy.py @@ -1,41 +1,44 @@ -import logging import os -from typing import Dict, List +from typing import Dict, List, Optional, Tuple from common_utils.labels import UserLabels -from controller.invoker.invoker_task_base import TaskBaseInvoker +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker from controller.utils import utils from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class TaskCopyInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + if len(request.in_dataset_ids) != 1: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"Invalid in_dataset_ids {request.in_dataset_ids}") + copy_request = request.req_create_task.copy - logging.info(f"copy_request: {copy_request}") if not (copy_request.src_user_id and copy_request.src_repo_id): return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, message="Invalid src user and/or repo id") - src_root = os.path.join(sandbox_root, copy_request.src_user_id, copy_request.src_repo_id) + src_root = os.path.join(self._sandbox_root, copy_request.src_user_id, copy_request.src_repo_id) if not os.path.isdir(src_root): return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, message=f"Invalid src root: {src_root}") return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0] + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + return [(cls.subtask_invoke_copy, 1.0)] @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_copy(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: copy_request = request.req_create_task.copy src_root = os.path.join(sandbox_root, copy_request.src_user_id, copy_request.src_repo_id) copy_response = cls.copying_cmd(repo_root=repo_root, task_id=subtask_id, src_root=src_root, - src_dataset_id=copy_request.src_dataset_id, + src_dataset_id=in_dataset_ids[0], work_dir=subtask_workdir, name_strategy_ignore=copy_request.name_strategy_ignore, drop_annotations=copy_request.drop_annotations) @@ -46,9 +49,8 @@ def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict def copying_cmd(repo_root: str, task_id: str, src_root: str, src_dataset_id: str, work_dir: str, name_strategy_ignore: bool, drop_annotations: bool) -> backend_pb2.GeneralResp: copying_cmd_str = [ - utils.mir_executable(), 'copy', '--root', repo_root, - '--src-root', src_root, '--dst-rev', f"{task_id}@{task_id}", '--src-revs', - f"{src_dataset_id}@{src_dataset_id}", '-w', work_dir + utils.mir_executable(), 'copy', '--root', repo_root, '--src-root', src_root, '--dst-rev', + f"{task_id}@{task_id}", '--src-revs', f"{src_dataset_id}@{src_dataset_id}", '-w', work_dir ] if name_strategy_ignore: diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_exporting.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_exporting.py index ea30be22b8..8a8e5aa834 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_exporting.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_exporting.py @@ -1,49 +1,56 @@ -import logging import os -from typing import Dict, List +from typing import Dict, List, Optional, Tuple from common_utils.labels import UserLabels -from controller.invoker.invoker_task_base import TaskBaseInvoker +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker from controller.utils import utils from id_definition.error_codes import CTLResponseCode +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 class TaskExportingInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + if len(request.in_dataset_ids) != 1: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"Invalid in_dataset_ids {request.in_dataset_ids}") + exporting_request = request.req_create_task.exporting - logging.info(f"exporting_requests: {exporting_request}") asset_dir = exporting_request.asset_dir if not asset_dir: return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, message="empty asset_dir") os.makedirs(asset_dir, exist_ok=True) - annotation_dir = exporting_request.annotation_dir - if exporting_request.format != backend_pb2.LabelFormat.NO_ANNOTATION: - if not annotation_dir: - return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, - message="empty annotation_dir") - os.makedirs(annotation_dir, exist_ok=True) + if exporting_request.format != mir_cmd_pb.AnnoFormat.AF_NO_ANNOTATION: + pred_dir = exporting_request.pred_dir + if not pred_dir: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, message="empty pred_dir") + os.makedirs(pred_dir, exist_ok=True) + + gt_dir = exporting_request.gt_dir + if not gt_dir: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, message="empty gt_dir") + os.makedirs(pred_dir, exist_ok=True) return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0] + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + return [(cls.subtask_invoke_export, 1.0)] @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_export(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: exporting_request = request.req_create_task.exporting - asset_dir = exporting_request.asset_dir - annotation_dir = exporting_request.annotation_dir media_location = assets_config['assetskvlocation'] exporting_response = cls.exporting_cmd(repo_root=repo_root, - dataset_id=exporting_request.dataset_id, + in_dataset_id=in_dataset_ids[0], annotation_format=utils.annotation_format_str(exporting_request.format), - asset_dir=asset_dir, - annotation_dir=annotation_dir, + asset_dir=exporting_request.asset_dir, + pred_dir=exporting_request.pred_dir, + gt_dir=exporting_request.gt_dir, media_location=media_location, work_dir=subtask_workdir) @@ -51,20 +58,26 @@ def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict @staticmethod def exporting_cmd(repo_root: str, - dataset_id: str, + in_dataset_id: str, annotation_format: str, asset_dir: str, - annotation_dir: str, + pred_dir: Optional[str], media_location: str, - work_dir: str, - keywords: List[str] = None) -> backend_pb2.GeneralResp: + work_dir: Optional[str] = None, + keywords: List[str] = None, + gt_dir: Optional[str] = None) -> backend_pb2.GeneralResp: exporting_cmd = [ utils.mir_executable(), 'export', '--root', repo_root, '--media-location', media_location, '--asset-dir', - asset_dir, '--annotation-dir', annotation_dir, '--src-revs', f"{dataset_id}@{dataset_id}", '--format', - annotation_format, '-w', work_dir + asset_dir, '--src-revs', f"{in_dataset_id}@{in_dataset_id}", '--anno-format', annotation_format ] if keywords: - exporting_cmd.append('--cis') + exporting_cmd.append('--class_names') exporting_cmd.append(';'.join(keywords)) + if work_dir: + exporting_cmd += ["-w", work_dir] + if pred_dir: + exporting_cmd += ["--pred-dir", pred_dir] + if gt_dir: + exporting_cmd += ["--gt-dir", gt_dir] return utils.run_command(exporting_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_factory.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_factory.py index 2c0cd4ffde..ff20ec399b 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_factory.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_factory.py @@ -3,29 +3,28 @@ from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker from controller.invoker.invoker_task_copy import TaskCopyInvoker from controller.invoker.invoker_task_exporting import TaskExportingInvoker -from controller.invoker.invoker_task_filter import TaskFilterInvoker from controller.invoker.invoker_task_fusion import TaskFusionInvoker -from controller.invoker.invoker_task_importing import TaskImportingInvoker +from controller.invoker.invoker_task_import_dataset import TaskImportDatasetInvoker +from controller.invoker.invoker_task_import_model import TaskImportModelInvoker from controller.invoker.invoker_task_labeling import TaskLabelingInvoker from controller.invoker.invoker_task_mining import TaskMiningInvoker -from controller.invoker.invoker_task_model_importing import TaskModelImportingInvoker from controller.invoker.invoker_task_training import TaskTrainingInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 class CreateTaskInvokerFactory(BaseMirControllerInvoker): _create_task_invokers_map = { - backend_pb2.TaskTypeCopyData: TaskCopyInvoker, - backend_pb2.TaskTypeExportData: TaskExportingInvoker, - backend_pb2.TaskTypeFilter: TaskFilterInvoker, - backend_pb2.TaskTypeImportData: TaskImportingInvoker, - backend_pb2.TaskTypeMining: TaskMiningInvoker, - backend_pb2.TaskTypeTraining: TaskTrainingInvoker, - backend_pb2.TaskTypeLabel: TaskLabelingInvoker, - backend_pb2.TaskTypeFusion: TaskFusionInvoker, - backend_pb2.TaskTypeImportModel: TaskModelImportingInvoker, - backend_pb2.TaskTypeCopyModel: TaskCopyInvoker, - backend_pb2.TaskTypeDatasetInfer: TaskMiningInvoker, + mir_cmd_pb.TaskType.TaskTypeCopyData: TaskCopyInvoker, + mir_cmd_pb.TaskType.TaskTypeExportData: TaskExportingInvoker, + mir_cmd_pb.TaskType.TaskTypeImportData: TaskImportDatasetInvoker, + mir_cmd_pb.TaskType.TaskTypeMining: TaskMiningInvoker, + mir_cmd_pb.TaskType.TaskTypeTraining: TaskTrainingInvoker, + mir_cmd_pb.TaskType.TaskTypeLabel: TaskLabelingInvoker, + mir_cmd_pb.TaskType.TaskTypeFusion: TaskFusionInvoker, + mir_cmd_pb.TaskType.TaskTypeImportModel: TaskImportModelInvoker, + mir_cmd_pb.TaskType.TaskTypeCopyModel: TaskCopyInvoker, + mir_cmd_pb.TaskType.TaskTypeDatasetInfer: TaskMiningInvoker, } def __new__(cls, request: backend_pb2.GeneralReq, *args, **kwargs) -> Any: # type: ignore diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_filter.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_filter.py deleted file mode 100644 index c1a017c202..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_filter.py +++ /dev/null @@ -1,71 +0,0 @@ -import logging -from typing import Dict, List -from common_utils.labels import UserLabels - -from controller.invoker.invoker_cmd_filter import FilterBranchInvoker -from controller.invoker.invoker_cmd_merge import MergeInvoker -from controller.invoker.invoker_task_base import TaskBaseInvoker -from controller.utils import invoker_call, utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class TaskFilterInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: - filter_request = request.req_create_task.filter - logging.info(f"filter_request: {filter_request}") - if not filter_request.in_dataset_ids: - return utils.make_general_response( - code=CTLResponseCode.ARG_VALIDATION_FAILED, - message="invalid_data_ids", - ) - - return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") - - @classmethod - def subtask_weights(cls) -> List[float]: - return [0.5, 0.5] - - @classmethod - def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: - filter_request = request.req_create_task.filter - in_dataset_ids = list(filter_request.in_dataset_ids) - merge_response = invoker_call.make_invoker_cmd_call( - invoker=MergeInvoker, - sandbox_root=sandbox_root, - req_type=backend_pb2.CMD_MERGE, - user_id=request.user_id, - repo_id=request.repo_id, - task_id=subtask_id, - his_task_id=in_dataset_ids[0], - dst_dataset_id=request.task_id, - in_dataset_ids=in_dataset_ids, - merge_strategy=request.merge_strategy, - work_dir=subtask_workdir, - ) - return merge_response - - @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: - filter_request = request.req_create_task.filter - - filter_response = invoker_call.make_invoker_cmd_call( - invoker=FilterBranchInvoker, - sandbox_root=sandbox_root, - req_type=backend_pb2.CMD_FILTER, - user_id=request.user_id, - repo_id=request.repo_id, - task_id=subtask_id, - his_task_id=previous_subtask_id, - dst_dataset_id=request.task_id, - in_dataset_ids=[request.task_id], - in_class_ids=filter_request.in_class_ids, - ex_class_ids=filter_request.ex_class_ids, - work_dir=subtask_workdir, - ) - - return filter_response diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_fusion.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_fusion.py index 2cd80c178d..06b226b131 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_fusion.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_fusion.py @@ -1,33 +1,41 @@ -from typing import Dict, List +from typing import Dict, List, Optional, Tuple from common_utils.labels import UserLabels from controller.invoker.invoker_cmd_filter import FilterBranchInvoker from controller.invoker.invoker_cmd_merge import MergeInvoker from controller.invoker.invoker_cmd_sampling import SamplingInvoker -from controller.invoker.invoker_task_base import TaskBaseInvoker +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker from controller.utils import invoker_call, utils from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class TaskFusionInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: - if not request.req_create_task.fusion.in_dataset_ids: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + if not request.in_dataset_ids: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, 'empty in_dataset_ids') return utils.make_general_response(CTLResponseCode.CTR_OK, "") @classmethod - def subtask_weights(cls) -> List: - return [0.4, 0.3, 0.3] + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + subtasks_queue: List[SubTaskType] = [] + if len(request.in_dataset_ids) > 1 or request.ex_dataset_ids: + subtasks_queue.append(cls.subtask_invoke_merge) + if request.in_class_ids or request.ex_class_ids: + subtasks_queue.append(cls.subtask_invoke_filter) + if request.sampling_count or 0 < request.sampling_rate < (1.0 - 1e-9): + subtasks_queue.append(cls.subtask_invoke_sample) + if not subtasks_queue: # empty ops, just copy. + subtasks_queue.append(cls.subtask_invoke_merge) + return [(x, 1.0 / len(subtasks_queue)) for x in subtasks_queue] @classmethod - def subtask_invoke_2(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_merge(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: """ merge """ - fusion_req = request.req_create_task.fusion - in_dataset_ids = list(fusion_req.in_dataset_ids) merge_response = invoker_call.make_invoker_cmd_call( invoker=MergeInvoker, sandbox_root=sandbox_root, @@ -35,21 +43,25 @@ def subtask_invoke_2(cls, sandbox_root: str, repo_root: str, assets_config: Dict user_id=request.user_id, repo_id=request.repo_id, task_id=subtask_id, - his_task_id=in_dataset_ids[0], - dst_dataset_id=request.task_id, + his_task_id=his_task_id, + dst_dataset_id=master_task_id, in_dataset_ids=in_dataset_ids, - ex_dataset_ids=fusion_req.ex_dataset_ids, - merge_strategy=fusion_req.merge_strategy, + ex_dataset_ids=request.ex_dataset_ids, + merge_strategy=request.merge_strategy, work_dir=subtask_workdir, ) return merge_response @classmethod - def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_filter(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: + if len(in_dataset_ids) != 1: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message="Invalid single in_dataset_ids {in_dataset_ids}") + """ filter """ - fusion_req = request.req_create_task.fusion filter_response = invoker_call.make_invoker_cmd_call( invoker=FilterBranchInvoker, sandbox_root=sandbox_root, @@ -57,33 +69,37 @@ def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict user_id=request.user_id, repo_id=request.repo_id, task_id=subtask_id, - his_task_id=previous_subtask_id, - dst_dataset_id=request.task_id, - in_dataset_ids=[request.task_id], - in_class_ids=fusion_req.in_class_ids, - ex_class_ids=fusion_req.ex_class_ids, + his_task_id=his_task_id, + dst_dataset_id=master_task_id, + in_dataset_ids=in_dataset_ids, + in_class_ids=request.in_class_ids, + ex_class_ids=request.ex_class_ids, work_dir=subtask_workdir, ) return filter_response @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_sample(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: + if len(in_dataset_ids) != 1: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message="Invalid single in_dataset_ids {in_dataset_ids}") + """ sampling """ - fusion_req = request.req_create_task.fusion sampling_response = invoker_call.make_invoker_cmd_call( invoker=SamplingInvoker, sandbox_root=sandbox_root, req_type=backend_pb2.CMD_SAMPLING, user_id=request.user_id, repo_id=request.repo_id, - task_id=request.task_id, - his_task_id=previous_subtask_id, - dst_dataset_id=request.task_id, - in_dataset_ids=[request.task_id], - sampling_count=fusion_req.count, - sampling_rate=fusion_req.rate, + task_id=subtask_id, + his_task_id=his_task_id, + dst_dataset_id=master_task_id, + in_dataset_ids=in_dataset_ids, + sampling_count=request.sampling_count, + sampling_rate=request.sampling_rate, work_dir=subtask_workdir, ) return sampling_response diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_import_dataset.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_import_dataset.py new file mode 100644 index 0000000000..968cf76431 --- /dev/null +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_import_dataset.py @@ -0,0 +1,98 @@ +import logging +import os +import shutil +from typing import Dict, List, Optional, Tuple +from common_utils.labels import UserLabels + +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker +from controller.utils import utils +from id_definition.error_codes import CTLResponseCode +from proto import backend_pb2, backend_pb2_utils + + +class TaskImportDatasetInvoker(TaskBaseInvoker): + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + import_dataset_request = request.req_create_task.import_dataset + (media_dir, pred_dir, gt_dir) = (import_dataset_request.asset_dir, import_dataset_request.pred_dir, + import_dataset_request.gt_dir) + if pred_dir: + if not os.access(pred_dir, os.R_OK): + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"invalid permissions of pred_dir: {pred_dir}") + if gt_dir: + if not os.access(gt_dir, os.R_OK): + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"invalid permissions of groundtruth_dir: {gt_dir}") + + if not os.access(media_dir, os.R_OK): + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"invalid permissions of media_dir:{media_dir}") + return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") + + @classmethod + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + return [(cls.subtask_invoke_import, 1.0)] + + @classmethod + def subtask_invoke_import(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: + import_dataset_request = request.req_create_task.import_dataset + + # Prepare media index-file + media_dir = import_dataset_request.asset_dir + media_files = [ + os.path.join(media_dir, f) for f in os.listdir(media_dir) if os.path.isfile(os.path.join(media_dir, f)) + ] + index_file = os.path.join(subtask_workdir, 'index.txt') + with open(index_file, 'w') as f: + f.write('\n'.join(media_files)) + + media_location = assets_config['assetskvlocation'] + import_dataset_response = cls.importing_cmd( + repo_root=repo_root, + task_id=subtask_id, + index_file=index_file, + pred_dir=import_dataset_request.pred_dir, + gt_dir=import_dataset_request.gt_dir, + media_location=media_location, + work_dir=subtask_workdir, + unknown_types_strategy=import_dataset_request.unknown_types_strategy) + + if import_dataset_request.clean_dirs: + logging.info("trying to clean all data dirs.") + try: + shutil.rmtree(media_dir) + except Exception: + pass + try: + shutil.rmtree(import_dataset_request.pred_dir) + except Exception: + pass + try: + shutil.rmtree(import_dataset_request.gt_dir) + except Exception: + pass + + return import_dataset_response + + @staticmethod + def importing_cmd(repo_root: str, task_id: str, index_file: str, pred_dir: str, gt_dir: str, media_location: str, + work_dir: str, + unknown_types_strategy: backend_pb2.UnknownTypesStrategy) -> backend_pb2.GeneralResp: + importing_cmd = [ + utils.mir_executable(), 'import', '--root', repo_root, '--dst-rev', + f"{task_id}@{task_id}", '--src-revs', 'master', '--index-file', index_file, '--gen-dir', media_location, + '-w', work_dir, "--anno-type", "det-box" + ] + if pred_dir: + importing_cmd.extend(['--pred-dir', pred_dir]) + if gt_dir: + importing_cmd.extend(['--gt-dir', gt_dir]) + importing_cmd.extend([ + '--unknown-types-strategy', + backend_pb2_utils.unknown_types_strategy_str_from_enum(unknown_types_strategy).value + ]) + + return utils.run_command(importing_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_import_model.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_import_model.py new file mode 100644 index 0000000000..a83a1742db --- /dev/null +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_import_model.py @@ -0,0 +1,38 @@ +import os +from typing import Dict, List, Optional, Tuple +from common_utils.labels import UserLabels + +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker +from controller.utils import utils +from id_definition.error_codes import CTLResponseCode +from proto import backend_pb2 + + +class TaskImportModelInvoker(TaskBaseInvoker): + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + import_model_request = request.req_create_task.import_model + model_package_path = import_model_request.model_package_path + if not os.path.isfile(model_package_path): + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"file not exists: {model_package_path}") + + return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") + + @classmethod + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + return [(cls.subtask_invoke_import, 1.0)] + + @classmethod + def subtask_invoke_import(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: + import_model_request = request.req_create_task.import_model + model_package_path = import_model_request.model_package_path + + cmd = [ + utils.mir_executable(), 'models', '--root', repo_root, '--package-path', model_package_path, '-w', + subtask_workdir, '--dst-rev', f"{subtask_id}@{subtask_id}", '--model-location', + assets_config["modelsuploadlocation"] + ] + return utils.run_command(cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_importing.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_importing.py deleted file mode 100644 index 86ec8ffbd5..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_importing.py +++ /dev/null @@ -1,71 +0,0 @@ -import logging -import os -from typing import Dict, List -from common_utils.labels import UserLabels - -from controller.invoker.invoker_task_base import TaskBaseInvoker -from controller.utils import utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class TaskImportingInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: - importing_request = request.req_create_task.importing - logging.info(f"importing_request: {importing_request}") - media_dir, anno_dir = importing_request.asset_dir, importing_request.annotation_dir - if anno_dir: - if not os.access(anno_dir, os.R_OK): - return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, - message=f"invalid permissions of annotation_dir: {anno_dir}") - - if not os.access(media_dir, os.R_OK): - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - message=f"invalid permissions of media_dir:{media_dir}") - return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") - - @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0] - - @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: - importing_request = request.req_create_task.importing - - # Prepare media index-file - media_dir, anno_dir = importing_request.asset_dir, importing_request.annotation_dir - media_files = [ - os.path.join(media_dir, f) for f in os.listdir(media_dir) if os.path.isfile(os.path.join(media_dir, f)) - ] - index_file = os.path.join(subtask_workdir, 'index.txt') - with open(index_file, 'w') as f: - f.write('\n'.join(media_files)) - - media_location = assets_config['assetskvlocation'] - importing_response = cls.importing_cmd(repo_root=repo_root, - task_id=subtask_id, - index_file=index_file, - annotation_dir=anno_dir, - media_location=media_location, - work_dir=subtask_workdir, - name_strategy_ignore=importing_request.name_strategy_ignore) - - return importing_response - - @staticmethod - def importing_cmd(repo_root: str, task_id: str, index_file: str, annotation_dir: str, media_location: str, - work_dir: str, name_strategy_ignore: bool) -> backend_pb2.GeneralResp: - importing_cmd = [ - utils.mir_executable(), 'import', '--root', repo_root, - '--dataset-name', task_id, '--dst-rev', f"{task_id}@{task_id}", - '--src-revs', 'master', '--index-file', index_file, '--gen-dir', media_location, '-w', work_dir - ] - if annotation_dir: - importing_cmd.append('--annotation-dir') - importing_cmd.append(annotation_dir) - if name_strategy_ignore: - importing_cmd.append("--ignore-unknown-types") - - return utils.run_command(importing_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_labeling.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_labeling.py index 1a46fffc5c..2ec18e34f2 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_labeling.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_labeling.py @@ -1,8 +1,8 @@ import logging -from typing import Dict, List +from typing import Dict, List, Optional, Tuple from common_utils import labels -from controller.invoker.invoker_task_base import TaskBaseInvoker +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker from controller.label_model import label_runner from controller.utils import utils from id_definition.error_codes import CTLResponseCode @@ -10,20 +10,30 @@ class TaskLabelingInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + if len(request.in_dataset_ids) != 1: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"Invalid in_dataset_ids {request.in_dataset_ids}") + try: + utils.create_label_instance() + except ValueError: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message="ill-configured label_tool") + return utils.make_general_response(CTLResponseCode.CTR_OK, "") @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0] + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + return [(cls.subtask_invoke_label, 1.0)] @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: labels.UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_label(cls, request: backend_pb2.GeneralReq, user_labels: labels.UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: labeling_request = request.req_create_task.labeling logging.info(f"labeling_request: {labeling_request}") - keywords = user_labels.get_main_names(class_ids=list(labeling_request.in_class_ids)) + keywords = user_labels.main_name_for_ids(class_ids=list(request.in_class_ids)) labeler_accounts = list(labeling_request.labeler_accounts) media_location = assets_config["assetskvlocation"] @@ -33,11 +43,11 @@ def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict media_location=media_location, task_id=subtask_id, project_name=labeling_request.project_name, - dataset_id=labeling_request.dataset_id, + dataset_id=in_dataset_ids[0], keywords=keywords, collaborators=labeler_accounts, expert_instruction=labeling_request.expert_instruction_url, - export_annotation=labeling_request.export_annotation, + annotation_type=labeling_request.annotation_type, ) return utils.make_general_response(CTLResponseCode.CTR_OK, "") diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_mining.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_mining.py index c7454cd143..ad6e94ed26 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_mining.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_mining.py @@ -1,28 +1,27 @@ -import logging import os -from typing import Dict, List +from typing import Dict, List, Optional, Tuple from common_utils.labels import UserLabels from controller.invoker.invoker_cmd_merge import MergeInvoker -from controller.invoker.invoker_task_base import TaskBaseInvoker +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker from controller.utils import utils, invoker_call +from controller.utils.errors import MirCtrError from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class TaskMiningInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + if not request.in_dataset_ids: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid_data_ids") + mining_request = request.req_create_task.mining - logging.info(f"mining_request: {mining_request}") if mining_request.top_k < 0: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "invalid topk: {}".format(mining_request.top_k)) - if not request.model_hash: + f"invalid topk: {mining_request.top_k}") + if not request.model_hash or not request.model_stage: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid model_hash") - if not mining_request.in_dataset_ids: - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid_data_ids") - # store executor config in task_0 work_dir subtask_work_dir_0 = self.subtask_work_dir(self._work_dir, utils.sub_task_id(self._task_id, 0)) output_config_file = self.gen_executor_config_path(subtask_work_dir_0) @@ -31,6 +30,7 @@ def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> task_parameters=request.task_parameters, class_names=[], output_config_file=output_config_file, + assets_config=self._assets_config, ) if not gpu_lock_ret: return utils.make_general_response(CTLResponseCode.LOCK_GPU_ERROR, "Not enough GPU available") @@ -38,14 +38,19 @@ def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> return utils.make_general_response(CTLResponseCode.CTR_OK, "") @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0, 0.0] + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + subtasks_queue: List[Tuple[SubTaskType, float]] = [] + if len(request.in_dataset_ids) > 1 or request.ex_dataset_ids: + subtasks_queue.append((cls.subtask_invoke_merge, 0)) + subtasks_queue.append((cls.subtask_invoke_mining, 1.0)) + + return subtasks_queue @classmethod - def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: - mining_request = request.req_create_task.mining + def subtask_invoke_merge(ccls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: merge_response = invoker_call.make_invoker_cmd_call( invoker=MergeInvoker, sandbox_root=sandbox_root, @@ -53,19 +58,27 @@ def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict user_id=request.user_id, repo_id=request.repo_id, task_id=subtask_id, - his_task_id=mining_request.in_dataset_ids[0], - dst_dataset_id=request.task_id, - in_dataset_ids=mining_request.in_dataset_ids, - ex_dataset_ids=mining_request.ex_dataset_ids, + his_task_id=his_task_id, + dst_dataset_id=master_task_id, + in_dataset_ids=in_dataset_ids, + ex_dataset_ids=request.ex_dataset_ids, merge_strategy=request.merge_strategy, work_dir=subtask_workdir, ) return merge_response @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_mining(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: + if not his_task_id: + raise MirCtrError(CTLResponseCode.INVOKER_GENERAL_ERROR, "empty previous_subtask_id in subtask_mining") + + if len(in_dataset_ids) != 1: + return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, + message=f"Invalid single in_dataset_ids {in_dataset_ids}") + mining_request = request.req_create_task.mining executant_name = request.task_id models_location = assets_config["modelskvlocation"] @@ -73,7 +86,7 @@ def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict mining_image = request.singleton_op config_file = cls.gen_executor_config_path(subtask_workdir) - asset_cache_dir = os.path.join(sandbox_root, request.user_id, "mining_assset_cache") + asset_cache_dir = os.path.join(sandbox_root, request.user_id, "asset_cache") mining_response = cls.mining_cmd(repo_root=repo_root, config_file=config_file, task_id=subtask_id, @@ -83,8 +96,9 @@ def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict media_location=media_location, top_k=mining_request.top_k, model_hash=request.model_hash, - in_dataset_id=request.task_id, - his_task_id=previous_subtask_id, + model_stage=request.model_stage, + in_dataset_id=in_dataset_ids[0], + his_task_id=his_task_id, executor=mining_image, executant_name=executant_name, generate_annotations=mining_request.generate_annotations) @@ -102,6 +116,7 @@ def mining_cmd( media_location: str, top_k: int, model_hash: str, + model_stage: str, in_dataset_id: str, his_task_id: str, asset_cache_dir: str, @@ -111,14 +126,15 @@ def mining_cmd( ) -> backend_pb2.GeneralResp: mining_cmd = [ utils.mir_executable(), 'mining', '--root', repo_root, '--dst-rev', f"{task_id}@{task_id}", '-w', work_dir, - '--model-location', model_location, '--media-location', media_location, '--model-hash', model_hash, - '--src-revs', f"{in_dataset_id}@{his_task_id}", '--asset-cache-dir', asset_cache_dir, '--task-config-file', - config_file, '--executor', executor, '--executant-name', executant_name + '--model-location', model_location, '--media-location', media_location, '--model-hash', + f"{model_hash}@{model_stage}", '--src-revs', f"{in_dataset_id}@{his_task_id}", '--asset-cache-dir', + asset_cache_dir, '--task-config-file', config_file, '--executor', executor, '--executant-name', + executant_name ] if top_k > 0: mining_cmd.append('--topk') mining_cmd.append(str(top_k)) if generate_annotations: - mining_cmd.append('--add-annotations') + mining_cmd.append('--add-prediction') return utils.run_command(mining_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_model_importing.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_model_importing.py deleted file mode 100644 index bbc83b05f9..0000000000 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_model_importing.py +++ /dev/null @@ -1,49 +0,0 @@ -import logging -import os -from typing import Dict, List -from common_utils.labels import UserLabels - -from controller.invoker.invoker_task_base import TaskBaseInvoker -from controller.utils import utils -from id_definition.error_codes import CTLResponseCode -from proto import backend_pb2 - - -class TaskModelImportingInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: - model_importing_request = request.req_create_task.model_importing - logging.info(f"model_importing_request: {model_importing_request}") - model_package_path = model_importing_request.model_package_path - if not os.path.isfile(model_package_path): - return utils.make_general_response(code=CTLResponseCode.ARG_VALIDATION_FAILED, - message=f"file not exists: {model_package_path}") - - return utils.make_general_response(code=CTLResponseCode.CTR_OK, message="") - - @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0] - - @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: - model_importing_request = request.req_create_task.model_importing - model_package_path = model_importing_request.model_package_path - - model_importing_response = cls.model_importing_cmd(repo_root=repo_root, - model_package_path=model_package_path, - task_id=subtask_id, - work_dir=subtask_workdir, - model_location=assets_config["modelsuploadlocation"]) - - return model_importing_response - - @classmethod - def model_importing_cmd(cls, repo_root: str, model_package_path: str, task_id: str, work_dir: str, - model_location: str) -> backend_pb2.GeneralResp: - cmd = [ - utils.mir_executable(), 'models', '--root', repo_root, '--package-path', model_package_path, - '-w', work_dir, '--dst-rev', f"{task_id}@{task_id}", '--model-location', model_location - ] - return utils.run_command(cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_training.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_training.py index 58efc9f306..1e7a0270a6 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_training.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_task_training.py @@ -1,29 +1,35 @@ import os -from typing import Dict, List +from typing import Dict, List, Optional, Tuple from common_utils.labels import UserLabels from controller.invoker.invoker_cmd_merge import MergeInvoker -from controller.invoker.invoker_task_base import TaskBaseInvoker +from controller.invoker.invoker_task_base import SubTaskType, TaskBaseInvoker from controller.utils import invoker_call, revs, utils +from controller.utils.errors import MirCtrError from id_definition.error_codes import CTLResponseCode from proto import backend_pb2 class TaskTrainingInvoker(TaskBaseInvoker): - def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: + def task_pre_invoke(self, request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: train_request = request.req_create_task.training if not train_request.in_dataset_types: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid dataset_types") + if not self._user_labels: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid uesr labels.") + # store executor config in task_0 work_dir subtask_work_dir_0 = self.subtask_work_dir(self._work_dir, utils.sub_task_id(self._task_id, 0)) output_config_file = self.gen_executor_config_path(subtask_work_dir_0) - class_names = self._user_labels.get_main_names(class_ids=list(train_request.in_class_ids)) + class_names = self._user_labels.main_name_for_ids(class_ids=list(request.in_class_ids)) gpu_lock_ret = self.gen_executor_config_lock_gpus( req_executor_config=request.docker_image_config, class_names=class_names, task_parameters=request.task_parameters, output_config_file=output_config_file, + assets_config=self._assets_config, + preprocess=train_request.preprocess_config, ) if not gpu_lock_ret: return utils.make_general_response(CTLResponseCode.LOCK_GPU_ERROR, "Not enough GPU available") @@ -31,17 +37,20 @@ def task_pre_invoke(self, sandbox_root: str, request: backend_pb2.GeneralReq) -> return utils.make_general_response(CTLResponseCode.CTR_OK, "") @classmethod - def subtask_weights(cls) -> List[float]: - return [1.0, 0.0] + def register_subtasks(cls, request: backend_pb2.GeneralReq) -> List[Tuple[SubTaskType, float]]: + return [(cls.subtask_invoke_merge, 0), (cls.subtask_invoke_training, 1.0)] @classmethod - def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_merge(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: train_request = request.req_create_task.training + # order merged datasets by training - validation + ordered_dataset_types = sorted(train_request.in_dataset_types, key=lambda v: v.dataset_type) in_dataset_ids = [ revs.join_tvt_dataset_id(dataset_type.dataset_type, dataset_type.dataset_id) - for dataset_type in train_request.in_dataset_types + for dataset_type in ordered_dataset_types ] merge_response = invoker_call.make_invoker_cmd_call( @@ -51,8 +60,8 @@ def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict user_id=request.user_id, repo_id=request.repo_id, task_id=subtask_id, - his_task_id=train_request.in_dataset_types[0].dataset_id, - dst_dataset_id=request.task_id, + his_task_id=ordered_dataset_types[0].dataset_id, + dst_dataset_id=master_task_id, in_dataset_ids=in_dataset_ids, merge_strategy=request.merge_strategy, work_dir=subtask_workdir, @@ -61,20 +70,25 @@ def subtask_invoke_1(cls, sandbox_root: str, repo_root: str, assets_config: Dict return merge_response @classmethod - def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict[str, str], - request: backend_pb2.GeneralReq, subtask_id: str, subtask_workdir: str, - previous_subtask_id: str, user_labels: UserLabels) -> backend_pb2.GeneralResp: + def subtask_invoke_training(cls, request: backend_pb2.GeneralReq, user_labels: UserLabels, sandbox_root: str, + assets_config: Dict[str, str], repo_root: str, master_task_id: str, subtask_id: str, + subtask_workdir: str, his_task_id: Optional[str], + in_dataset_ids: List[str]) -> backend_pb2.GeneralResp: + if not his_task_id: + raise MirCtrError(CTLResponseCode.INVOKER_GENERAL_ERROR, "empty previous_subtask_id in subtask_mining") + models_upload_location = assets_config["modelsuploadlocation"] media_location = assets_config["assetskvlocation"] training_image = request.singleton_op - tensorboard_root = assets_config['tensorboard_root'] + tensorboard_root = assets_config["tensorboard_root"] tensorboard_dir = os.path.join(tensorboard_root, request.user_id, request.task_id) os.makedirs(tensorboard_dir, exist_ok=True) + asset_cache_dir = os.path.join(sandbox_root, request.user_id, "asset_cache") + os.makedirs(asset_cache_dir, exist_ok=True) + config_file = cls.gen_executor_config_path(subtask_workdir) - asset_cache_dir = os.path.join(sandbox_root, request.user_id, "training_assset_cache") - executant_name = request.task_id train_response = cls.training_cmd( repo_root=repo_root, config_file=config_file, @@ -82,13 +96,14 @@ def subtask_invoke_0(cls, sandbox_root: str, repo_root: str, assets_config: Dict media_location=media_location, task_id=subtask_id, work_dir=subtask_workdir, - in_dataset_id=request.task_id, - his_task_id=previous_subtask_id, + in_dataset_id=in_dataset_ids[0], + his_task_id=his_task_id, asset_cache_dir=asset_cache_dir, training_image=training_image, - executant_name=executant_name, + executant_name=request.task_id, tensorboard=tensorboard_dir, model_hash=request.model_hash, + model_stage=request.model_stage, ) return train_response @@ -108,16 +123,17 @@ def training_cmd( executant_name: str, tensorboard: str, model_hash: str, + model_stage: str, ) -> backend_pb2.GeneralResp: training_cmd = [ utils.mir_executable(), 'train', '--root', repo_root, '--dst-rev', f"{task_id}@{task_id}", '--model-location', models_upload_location, '--media-location', media_location, '-w', work_dir, '--src-revs', f"{in_dataset_id}@{his_task_id}", '--task-config-file', config_file, '--executor', - training_image, '--executant-name', executant_name, '--tensorboard-dir', tensorboard, - '--asset-cache-dir', asset_cache_dir + training_image, '--executant-name', executant_name, '--tensorboard-dir', tensorboard, '--asset-cache-dir', + asset_cache_dir ] - if model_hash: + if model_hash and model_stage: training_cmd.append('--model-hash') - training_cmd.append(model_hash) + training_cmd.append(f"{model_hash}@{model_stage}") return utils.run_command(training_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/label_model/base.py b/ymir/backend/src/ymir_controller/controller/label_model/base.py index 827f40ff0c..f27179d37f 100644 --- a/ymir/backend/src/ymir_controller/controller/label_model/base.py +++ b/ymir/backend/src/ymir_controller/controller/label_model/base.py @@ -12,6 +12,10 @@ from id_definition.error_codes import CTLResponseCode +class NotReadyError(Exception): + pass + + def catch_label_task_error(f: Callable) -> Callable: @wraps(f) def wrapper(*args: tuple, **kwargs: Any) -> object: @@ -103,8 +107,17 @@ def run( # now we have to loop label task for get status # maybe add API for labeling tool to report self status later https://labelstud.io/guide/webhooks.html @staticmethod - def store_label_task_mapping(project_id: int, task_id: str, monitor_file_path: str, des_annotation_path: str, - repo_root: str, media_location: str, import_work_dir: str, storage_id: int) -> None: + def store_label_task_mapping( + project_id: int, + task_id: str, + monitor_file_path: str, + des_annotation_path: str, + repo_root: str, + media_location: str, + import_work_dir: str, + storage_id: int, + input_asset_dir: str + ) -> None: # store into redis for loop get status label_task_content = dict(project_id=project_id, task_id=task_id, @@ -113,6 +126,7 @@ def store_label_task_mapping(project_id: int, task_id: str, monitor_file_path: s repo_root=repo_root, media_location=media_location, import_work_dir=import_work_dir, - storage_id=storage_id) + storage_id=storage_id, + input_asset_dir=input_asset_dir) rds.hset(name=label_task_config.MONITOR_MAPPING_KEY, mapping={task_id: json.dumps(label_task_content)}) diff --git a/ymir/backend/src/ymir_controller/controller/label_model/label_free.py b/ymir/backend/src/ymir_controller/controller/label_model/label_free.py index b00f09e4f7..c7d66b44bc 100644 --- a/ymir/backend/src/ymir_controller/controller/label_model/label_free.py +++ b/ymir/backend/src/ymir_controller/controller/label_model/label_free.py @@ -8,7 +8,9 @@ from typing import Dict, List from xml.etree import ElementTree -from controller.label_model.base import LabelBase, catch_label_task_error +import requests + +from controller.label_model.base import LabelBase, catch_label_task_error, NotReadyError from controller.label_model.request_handler import RequestHandler @@ -114,6 +116,7 @@ def safe_div(a: int, b: int) -> float: return a / b content = self.get_project_info(project_id) + logging.info("label task percent info: %s", content) percent = safe_div(content["num_tasks_with_annotations"], content["task_number"]) return percent @@ -143,12 +146,44 @@ def unzip_annotation_files(cls, content: BytesIO, des_path: str) -> None: cls._move_voc_files(des_path) def convert_annotation_to_voc(self, project_id: int, des_path: str) -> None: - url_path = f"/api/projects/{project_id}/export?exportType=VOC" - resp = self._requests.get(url_path=url_path) - self.unzip_annotation_files(BytesIO(resp), des_path) - + export_task_id = self.get_export_task(project_id) + export_url = self.get_export_url(project_id, export_task_id) + resp = requests.get(export_url) + self.unzip_annotation_files(BytesIO(resp.content), des_path) logging.info(f"success convert_annotation_to_ymir: {des_path}") + def get_export_task(self, project_id: int) -> str: + url_path = "/api/v1/export" + params = {"project_id": project_id, "page_size": 1} + resp = self._requests.get(url_path=url_path, params=params) + export_tasks = json.loads(resp)["data"]["export_tasks"] + if export_tasks: + return export_tasks[0]["task_id"] + else: + self.create_export_task(project_id) + raise NotReadyError() + + def create_export_task(self, project_id: int) -> None: + url_path = "/api/v1/export" + payload = {"project_id": project_id, "export_type": 1, "export_image": False} + resp = self._requests.post(url_path=url_path, json_data=payload) + try: + export_task_id = json.loads(resp)["data"]["task_id"] + except Exception: + logging.exception("failed to create export task for label project %s", project_id) + else: + logging.info("created export task %s for label project %s", export_task_id, project_id) + + def get_export_url(self, project_id: int, export_task_id: str) -> str: + url_path = f"/api/v1/export/{export_task_id}" + resp = self._requests.get(url_path=url_path) + try: + export_url = json.loads(resp)["data"]["store_path"] + except Exception: + logging.info("label task %s not finished", export_task_id) + raise NotReadyError() + return export_url + @catch_label_task_error def run( self, @@ -179,4 +214,5 @@ def run( media_location, import_work_dir, exported_storage_id, + input_asset_dir, ) diff --git a/ymir/backend/src/ymir_controller/controller/label_model/label_runner.py b/ymir/backend/src/ymir_controller/controller/label_model/label_runner.py index c12f286b3e..2bbc8b1c12 100644 --- a/ymir/backend/src/ymir_controller/controller/label_model/label_runner.py +++ b/ymir/backend/src/ymir_controller/controller/label_model/label_runner.py @@ -1,9 +1,10 @@ import logging import os -from typing import Tuple, List +from typing import Tuple, List, Optional from controller.invoker.invoker_task_exporting import TaskExportingInvoker from controller.utils import utils +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 @@ -27,15 +28,23 @@ def prepare_label_dir(working_dir: str, task_id: str) -> Tuple[str, str, str, st def trigger_ymir_export(repo_root: str, dataset_id: str, input_asset_dir: str, media_location: str, - export_work_dir: str, keywords: List[str]) -> None: + export_work_dir: str, keywords: List[str], annotation_type: Optional[int]) -> None: # trigger ymir export, so that we can get pictures from ymir - format_str = utils.annotation_format_str(backend_pb2.LabelFormat.LABEL_STUDIO_JSON) + format_str = utils.annotation_format_str(mir_cmd_pb.AnnoFormat.AF_DET_LS_JSON) + + gt_dir: Optional[str] = None + pred_dir: Optional[str] = None + if annotation_type == backend_pb2.AnnotationType.GT: + gt_dir = input_asset_dir + elif annotation_type == backend_pb2.AnnotationType.PRED: + pred_dir = input_asset_dir TaskExportingInvoker.exporting_cmd(repo_root=repo_root, - dataset_id=dataset_id, + in_dataset_id=dataset_id, annotation_format=format_str, asset_dir=input_asset_dir, - annotation_dir=input_asset_dir, + pred_dir=pred_dir, + gt_dir=gt_dir, media_location=media_location, work_dir=export_work_dir, keywords=keywords) @@ -51,7 +60,7 @@ def start_label_task( keywords: List, collaborators: List, expert_instruction: str, - export_annotation: bool, + annotation_type: Optional[int], ) -> None: logging.info("start label task!!!") label_instance = utils.create_label_instance() @@ -62,7 +71,8 @@ def start_label_task( input_asset_dir=input_asset_dir, media_location=media_location, export_work_dir=export_work_dir, - keywords=keywords) + keywords=keywords, + annotation_type=annotation_type) label_instance.run(task_id=task_id, project_name=project_name, keywords=keywords, @@ -74,5 +84,5 @@ def start_label_task( repo_root=repo_root, media_location=media_location, import_work_dir=import_work_dir, - use_pre_annotation=export_annotation) + use_pre_annotation=bool(annotation_type)) logging.info("finish label task!!!") diff --git a/ymir/backend/src/ymir_controller/controller/label_model/label_studio.py b/ymir/backend/src/ymir_controller/controller/label_model/label_studio.py index f84890a788..aec9435dc3 100644 --- a/ymir/backend/src/ymir_controller/controller/label_model/label_studio.py +++ b/ymir/backend/src/ymir_controller/controller/label_model/label_studio.py @@ -253,4 +253,5 @@ def run( media_location, import_work_dir, exported_storage_id, + input_asset_dir, ) diff --git a/ymir/backend/src/ymir_controller/controller/label_model/request_handler.py b/ymir/backend/src/ymir_controller/controller/label_model/request_handler.py index 3090784947..a271b0b09e 100644 --- a/ymir/backend/src/ymir_controller/controller/label_model/request_handler.py +++ b/ymir/backend/src/ymir_controller/controller/label_model/request_handler.py @@ -13,15 +13,16 @@ def __init__( ): self.url = url self.headers = headers + self.timeout = label_task_config.LABEL_TOOL_TIMEOUT def get(self, url_path: str, params: Dict = {}) -> bytes: - resp = requests.get(url=f"{self.url}{url_path}", headers=self.headers, params=params, timeout=600) + resp = requests.get(url=f"{self.url}{url_path}", headers=self.headers, params=params, timeout=self.timeout) resp.raise_for_status() return resp.content def post(self, url_path: str, params: Dict = {}, json_data: Dict = {}) -> bytes: resp = requests.post( - url=f"{self.url}{url_path}", headers=self.headers, params=params, json=json_data, timeout=600 + url=f"{self.url}{url_path}", headers=self.headers, params=params, json=json_data, timeout=self.timeout ) resp.raise_for_status() return resp.content diff --git a/ymir/backend/src/ymir_controller/controller/label_project_monitor.py b/ymir/backend/src/ymir_controller/controller/label_project_monitor.py index 22c904e056..e198901ac0 100644 --- a/ymir/backend/src/ymir_controller/controller/label_project_monitor.py +++ b/ymir/backend/src/ymir_controller/controller/label_project_monitor.py @@ -1,6 +1,7 @@ import json import logging import os +from pathlib import Path import sys from requests.exceptions import ConnectionError, HTTPError, Timeout @@ -9,18 +10,24 @@ from common_utils.percent_log_util import LogState, PercentLogHandler from controller.config import label_task as label_task_config -from controller.invoker.invoker_task_importing import TaskImportingInvoker +from controller.invoker.invoker_task_import_dataset import TaskImportDatasetInvoker from controller.utils import utils from controller.utils.redis import rds +from controller.label_model.base import NotReadyError +from proto import backend_pb2 -def trigger_mir_import( - repo_root: str, task_id: str, index_file: str, des_annotation_path: str, media_location: str, import_work_dir: str -) -> None: +def trigger_mir_import(repo_root: str, task_id: str, index_file: str, des_annotation_path: str, media_location: str, + import_work_dir: str) -> None: # trigger mir import - TaskImportingInvoker.importing_cmd( - repo_root, task_id, index_file, des_annotation_path, media_location, import_work_dir, name_strategy_ignore=False - ) + TaskImportDatasetInvoker.importing_cmd(repo_root=repo_root, + task_id=task_id, + index_file=index_file, + pred_dir='', + gt_dir=des_annotation_path, + media_location=media_location, + work_dir=import_work_dir, + unknown_types_strategy=backend_pb2.UnknownTypesStrategy.UTS_STOP) def remove_json_file(des_annotation_path: str) -> None: @@ -28,35 +35,22 @@ def remove_json_file(des_annotation_path: str) -> None: logging.error(f"des_annotation_path not exist: {des_annotation_path}") return - for one_file in os.listdir(des_annotation_path): - if one_file.endswith(".json"): - os.remove(os.path.join(des_annotation_path, one_file)) - - -def _gen_index_file(des_annotation_path: str) -> str: - media_files = [] - - if label_task_config.LABEL_STUDIO == label_task_config.LABEL_TOOL: - for one_file in os.listdir(des_annotation_path): - if one_file.endswith(".json"): - with open(os.path.join(des_annotation_path, one_file)) as f: - json_content = json.load(f) - pic_path = json_content["task"]["data"]["image"].replace("data/local-files/?d=", "") - media_files.append(pic_path) - elif label_task_config.LABEL_FREE == label_task_config.LABEL_TOOL: - des_annotation_media_path = os.path.join(des_annotation_path, "images") - if os.path.isdir(des_annotation_media_path): - for one_file in os.listdir(des_annotation_media_path): - if os.path.splitext(one_file)[1].lower() in [".jpeg", ".jpg", ".png"]: - media_files.append(os.path.join(des_annotation_media_path, one_file)) - else: - raise ValueError("LABEL_TOOL Error") + for annotation_file in os.listdir(des_annotation_path): + if annotation_file.endswith(".json"): + os.remove(os.path.join(des_annotation_path, annotation_file)) - index_file = os.path.join(des_annotation_path, "index.txt") - with open(index_file, "w") as f: - f.write("\n".join(media_files)) - return index_file +def generate_label_index_file(input_file: Path, annotation_dir: Path) -> Path: + """ + filter assets paths against related annotation files + """ + labeled_assets_hashes = [i.stem for i in annotation_dir.iterdir() if i.suffix == ".xml"] + output_file = input_file.with_name("label_index.tsv") + with open(input_file) as in_, open(output_file, "w") as out_: + for asset_path in in_: + if Path(asset_path.strip()).stem in labeled_assets_hashes: + out_.write(asset_path) + return output_file def lable_task_monitor() -> None: @@ -73,21 +67,24 @@ def lable_task_monitor() -> None: remove_json_file(project_info["des_annotation_path"]) try: label_instance.sync_export_storage(project_info["storage_id"]) - label_instance.convert_annotation_to_voc( - project_info["project_id"], project_info["des_annotation_path"] - ) + label_instance.convert_annotation_to_voc(project_info["project_id"], + project_info["des_annotation_path"]) + except NotReadyError: + logging.info("label result not ready, try agiain later") + continue except (ConnectionError, HTTPError, Timeout) as e: sentry_sdk.capture_exception(e) logging.error(f"get label task {task_id} error: {e}, set task_id:{task_id} error") state = LogState.ERROR - index_file = _gen_index_file(project_info["des_annotation_path"]) + export_index_file = Path(project_info["input_asset_dir"]) / "index.tsv" + label_index_file = generate_label_index_file(export_index_file, Path(project_info["des_annotation_path"])) trigger_mir_import( - project_info["repo_root"], - task_id, - index_file, - project_info["des_annotation_path"], - project_info["media_location"], - project_info["import_work_dir"], + repo_root=project_info["repo_root"], + task_id=task_id, + index_file=str(label_index_file), + des_annotation_path=project_info["des_annotation_path"], + media_location=project_info["media_location"], + import_work_dir=project_info["import_work_dir"], ) rds.hdel(label_task_config.MONITOR_MAPPING_KEY, task_id) diff --git a/ymir/backend/src/ymir_controller/controller/server.py b/ymir/backend/src/ymir_controller/controller/server.py index 005ac76c34..3be359a8cf 100644 --- a/ymir/backend/src/ymir_controller/controller/server.py +++ b/ymir/backend/src/ymir_controller/controller/server.py @@ -8,10 +8,12 @@ from typing import Any, Dict import grpc +from grpc_health.v1 import health +from grpc_health.v1 import health_pb2_grpc from requests.exceptions import ConnectionError, HTTPError, Timeout -import sentry_sdk import yaml +from common_utils.sandbox_util import check_sandbox from controller.utils import errors, metrics, utils, invoker_mapping from id_definition.error_codes import CTLResponseCode from proto import backend_pb2, backend_pb2_grpc @@ -58,9 +60,9 @@ def data_manage_request(self, request: backend_pb2.GeneralReq, context: Any) -> logging.exception(f"task {task_id} general error: {e}") return utils.make_general_response(CTLResponseCode.INVOKER_UNKNOWN_ERROR, str(e)) - logging.info(f"task {task_id} result: {invoker_result}") if isinstance(invoker_result, backend_pb2.GeneralResp): return invoker_result + return utils.make_general_response(CTLResponseCode.UNKOWN_RESPONSE_FORMAT, "unknown result type: {}".format(type(invoker_result))) @@ -85,6 +87,7 @@ def path_constructor(loader: Any, node: Any) -> str: env_value = os.environ.get(env_var) if not env_value: logging.info(f"env empty for key: {env_var}") + return "" return env_value + value[match.end():] @@ -96,18 +99,6 @@ def parse_config_file(config_file: str) -> Any: return yaml.safe_load(f) -def _set_debug_info(debug_mode: bool = False) -> None: - if debug_mode: - logging.basicConfig(stream=sys.stdout, - format='%(levelname)-8s: [%(asctime)s] %(filename)s:%(lineno)s:%(funcName)s(): %(message)s', - datefmt='%Y%m%d-%H:%M:%S', - level=logging.DEBUG) - logging.debug("in debug mode") - else: - logging.basicConfig(stream=sys.stdout, format='%(message)s', level=logging.INFO) - sentry_sdk.init(os.environ.get("CONTROLLER_SENTRY_DSN", None)) - - def _init_metrics(metrics_config: Dict) -> None: try: metrics_permission_pass = bool(strtobool(metrics_config['allow_feedback'])) @@ -122,12 +113,18 @@ def _init_metrics(metrics_config: Dict) -> None: def main(main_args: Any) -> int: - _set_debug_info(main_args.debug) + for handler in logging.root.handlers[:]: + logging.root.removeHandler(handler) + logging.basicConfig(stream=sys.stdout, + format='%(levelname)-4s: [%(asctime)s] %(filename)s:%(lineno)-03s:\t%(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) server_config = parse_config_file(main_args.config_file) sandbox_root = server_config['SANDBOX']['sandboxroot'] os.makedirs(sandbox_root, exist_ok=True) + check_sandbox(sandbox_root) _init_metrics(server_config['METRICS']) # start grpc server @@ -135,6 +132,9 @@ def main(main_args: Any) -> int: mc_service_impl = MirControllerService(sandbox_root=sandbox_root, assets_config=server_config['ASSETS']) server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) backend_pb2_grpc.add_mir_controller_serviceServicer_to_server(mc_service_impl, server) + + health_pb2_grpc.add_HealthServicer_to_server(health.HealthServicer(), server) + server.add_insecure_port("[::]:{}".format(port)) server.start() diff --git a/ymir/backend/src/ymir_controller/controller/server_local_config.yaml b/ymir/backend/src/ymir_controller/controller/server_local_config.yaml index da2df3cf30..289bca142a 100644 --- a/ymir/backend/src/ymir_controller/controller/server_local_config.yaml +++ b/ymir/backend/src/ymir_controller/controller/server_local_config.yaml @@ -10,12 +10,19 @@ ASSETS: modelsuploadlocation: /tmp/ymir-controller-sandbox-root/ymir-models/ modelsuploadprotocol: file tensorboard_root: /tmp/ymir-controller-sandbox-root/ymir-tensorboard-logs + openpai_host: 127.0.0.1 + openpai_token: fake_token + openpai_storage: fake_storage + openpai_user: fake_user + openpai_cluster: default + openpai_gputype: gpu-machine + server_runtime: runc SANDBOX: sandboxowner: ymir-root sandboxownergroup: ymir-root sandboxroot: /tmp/ymir-controller-sandbox-root/sandbox METRICS: - allow_feedback: True + allow_feedback: False anonymous_uuid: localtest server_host: localhost server_port: "9125" diff --git a/ymir/backend/src/ymir_controller/controller/server_prd_config.yaml b/ymir/backend/src/ymir_controller/controller/server_prd_config.yaml index 33abc2d95f..2a1aa8661c 100644 --- a/ymir/backend/src/ymir_controller/controller/server_prd_config.yaml +++ b/ymir/backend/src/ymir_controller/controller/server_prd_config.yaml @@ -10,6 +10,13 @@ ASSETS: modelsuploadlocation: ${MODELS_PATH} modelsuploadprotocol: file tensorboard_root: ${TENSORBOARD_ROOT} + openpai_host: ${OPENPAI_HOST} + openpai_token: ${OPENPAI_TOKEN} + openpai_storage: ${OPENPAI_STORAGE} + openpai_user: ${OPENPAI_USER} + openpai_cluster: ${OPENPAI_CLUSTER} + openpai_gputype: ${OPENPAI_GPUTYPE} + server_runtime: ${SERVER_RUNTIME} SANDBOX: sandboxowner: ymir-root sandboxownergroup: ymir-root diff --git a/ymir/backend/src/ymir_controller/controller/utils/checker.py b/ymir/backend/src/ymir_controller/controller/utils/checker.py index b7fc7bbb77..39b8baaefb 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/checker.py +++ b/ymir/backend/src/ymir_controller/controller/utils/checker.py @@ -3,6 +3,7 @@ import sys from enum import auto, IntEnum, unique from typing import List +from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker from controller.utils import utils from id_definition import task_id as task_id_proto @@ -31,75 +32,110 @@ class Prerequisites(IntEnum): # check controller request -def check_request(request: backend_pb2.GeneralReq, - prerequisites: List[Prerequisites] = [], - mir_root: str = None) -> backend_pb2.GeneralResp: +def check_invoker(invoker: BaseMirControllerInvoker, + prerequisites: List[Prerequisites] = []) -> backend_pb2.GeneralResp: for item in prerequisites: checker_name = "_{}".format(item.name.lower()) checker_func = getattr(sys.modules[__name__], checker_name) - ret = checker_func(request=request, mir_root=mir_root) + ret = checker_func(invoker=invoker) if ret.code != CTLResponseCode.CTR_OK: logging.info("check failed: {}".format(item.name.lower())) return ret return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_nothing(request: backend_pb2.GeneralReq) -> backend_pb2.GeneralResp: +def _check_nothing(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_user_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - user_id = request.user_id +def _check_user_id(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + user_id = invoker._user_id if not (user_id and utils.check_valid_input_string(user_id) and len(user_id) == task_id_proto.IDProto.ID_LEN_USER_ID): return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "invalid user {}, abort".format(request.user_id)) + "invalid user {}, abort".format(user_id)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_repo_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - repo_id = request.repo_id +def _check_repo_id(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + repo_id = invoker._repo_id if not (repo_id and utils.check_valid_input_string(repo_id) and len(repo_id) == task_id_proto.IDProto.ID_LEN_REPO_ID): return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "invalid repo {}, abort".format(request.repo_id)) + "invalid repo {}, abort".format(repo_id)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_task_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - task_id = request.task_id +def _check_task_id(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + task_id = invoker._task_id if not (task_id and utils.check_valid_input_string(task_id) and len(task_id) == task_id_proto.IDProto.ID_LENGTH): return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid task {}, abort".format(task_id)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_singleton_op(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - task_id = request.singleton_op +def _check_repo_root_exist(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + mir_root = invoker._repo_root + if not os.path.isdir(mir_root): + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "mir_root not exist: {}, abort".format(mir_root)) + + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + +def _check_repo_root_not_exist(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + mir_root = invoker._repo_root + if os.path.isdir(mir_root): + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "mir_root exist: {}, abort".format(mir_root)) + + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + +def _check_user_root_exist(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + user_root = invoker._user_root + if not os.path.isdir(user_root): + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "user_root not exist: {}, abort".format(user_root)) + + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + +def _check_user_root_not_exist(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + user_root = invoker._user_root + if os.path.isdir(user_root): + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "user_root exist: {}, abort".format(user_root)) + + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + +def _check_singleton_op(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + task_id = invoker._request.singleton_op if not (task_id and utils.check_valid_input_string(task_id) and len(task_id) == task_id_proto.IDProto.ID_LENGTH): return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid singleton_op {}, abort".format(task_id)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_dst_dataset_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - task_id = request.dst_dataset_id +def _check_dst_dataset_id(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + task_id = invoker._request.dst_dataset_id if not (task_id and utils.check_valid_input_string(task_id) and len(task_id) == task_id_proto.IDProto.ID_LENGTH): return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid dst task {}, abort".format(task_id)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_his_task_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - task_id = request.his_task_id +def _check_his_task_id(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + task_id = invoker._request.his_task_id if not (task_id and utils.check_valid_input_string(task_id) and len(task_id) == task_id_proto.IDProto.ID_LENGTH): return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid dst task {}, abort".format(task_id)) return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_guest_branches(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - guest_branches = request.guest_branches +def _check_guest_branches(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + guest_branches = invoker._request.guest_branches if not guest_branches: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid guest branches {}, abort".format(guest_branches)) @@ -110,8 +146,8 @@ def _check_guest_branches(request: backend_pb2.GeneralReq, mir_root: str) -> bac return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_taskinfo_ids(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - task_info_ids = request.req_get_task_info.task_ids +def _check_taskinfo_ids(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + task_info_ids = invoker._request.req_get_task_info.task_ids if len(task_info_ids) == 0: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, 'no task ids in request') for single_task_id in task_info_ids: @@ -121,8 +157,8 @@ def _check_taskinfo_ids(request: backend_pb2.GeneralReq, mir_root: str) -> backe return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_commit_message(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - commit_message = request.commit_message +def _check_commit_message(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + commit_message = invoker._request.commit_message if not commit_message: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid commit_message: {}, abort".format(commit_message)) @@ -130,42 +166,8 @@ def _check_commit_message(request: backend_pb2.GeneralReq, mir_root: str) -> bac return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_repo_root_exist(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - if not os.path.isdir(mir_root): - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "mir_root not exist: {}, abort".format(mir_root)) - - return utils.make_general_response(CTLResponseCode.CTR_OK, "") - - -def _check_repo_root_not_exist(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - if os.path.isdir(mir_root): - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "mir_root exist: {}, abort".format(mir_root)) - - return utils.make_general_response(CTLResponseCode.CTR_OK, "") - - -def _check_user_root_exist(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - user_root = os.path.basename(mir_root) - if not os.path.isdir(user_root): - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "user_root not exist: {}, abort".format(user_root)) - - return utils.make_general_response(CTLResponseCode.CTR_OK, "") - - -def _check_user_root_not_exist(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - user_root = os.path.basename(mir_root) - if os.path.isdir(user_root): - return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, - "user_root exist: {}, abort".format(user_root)) - - return utils.make_general_response(CTLResponseCode.CTR_OK, "") - - -def _check_in_dataset_ids(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - in_dataset_ids = request.in_dataset_ids +def _check_in_dataset_ids(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + in_dataset_ids = invoker._request.in_dataset_ids if not in_dataset_ids: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid in_dataset ids: {}".format(in_dataset_ids)) @@ -173,8 +175,8 @@ def _check_in_dataset_ids(request: backend_pb2.GeneralReq, mir_root: str) -> bac return utils.make_general_response(CTLResponseCode.CTR_OK, "") -def _check_single_in_dataset_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: - in_dataset_ids = request.in_dataset_ids +def _check_single_in_dataset_id(invoker: BaseMirControllerInvoker) -> backend_pb2.GeneralResp: + in_dataset_ids = invoker._request.in_dataset_ids if not in_dataset_ids or len(in_dataset_ids) > 1: return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, "invalid single in_dataset ids: {}".format(in_dataset_ids)) diff --git a/ymir/backend/src/ymir_controller/controller/utils/gpu_utils.py b/ymir/backend/src/ymir_controller/controller/utils/gpu_utils.py index 787b8a314f..ed7e3c70d6 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/gpu_utils.py +++ b/ymir/backend/src/ymir_controller/controller/utils/gpu_utils.py @@ -1,6 +1,6 @@ import logging import time -from typing import List, Dict, Set +from typing import List, Dict, Optional, Set import sentry_sdk from pynvml import ( @@ -69,11 +69,14 @@ def get_available_gpus(cls) -> List: return list(ava_gpus) + # Return None if failed to fulfill demanded gpu. @classmethod - def find_gpu_ids_by_config(cls, gpu_count: int, lock_gpu: bool = False) -> str: + def find_gpu_ids_by_config(cls, gpu_count: int, lock_gpu: bool = False) -> Optional[str]: + if gpu_count <= 0: + return "" free_gpus = cls.get_available_gpus() if len(free_gpus) < gpu_count: - return "" + return None gpus = free_gpus[0:gpu_count] if lock_gpu: cls.add_locked_gpus(gpus) diff --git a/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py b/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py index b483b58bb2..f1d6267590 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py +++ b/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py @@ -1,6 +1,7 @@ from typing import Any, List from proto import backend_pb2 +from mir.protos import mir_command_pb2 as mir_cmd_pb def make_cmd_request(user_id: str = None, @@ -18,6 +19,7 @@ def make_cmd_request(user_id: str = None, asset_dir: str = None, model_config: str = None, model_hash: str = None, + model_stage: str = None, force: bool = None, commit_message: str = None, executant_name: str = None, @@ -28,7 +30,7 @@ def make_cmd_request(user_id: str = None, sampling_count: int = None, sampling_rate: float = None, task_parameters: str = None, - evaluate_config: backend_pb2.EvaluateConfig = None) -> backend_pb2.GeneralReq: + evaluate_config: mir_cmd_pb.EvaluateConfig = None) -> backend_pb2.GeneralReq: request = backend_pb2.GeneralReq() if user_id is not None: request.user_id = user_id @@ -64,6 +66,8 @@ def make_cmd_request(user_id: str = None, request.model_config = model_config if model_hash is not None: request.model_hash = model_hash + if model_stage is not None: + request.model_stage = model_stage if req_create_task is not None: request.req_create_task.CopyFrom(req_create_task) if executant_name is not None: @@ -107,12 +111,13 @@ def make_invoker_cmd_call(invoker: Any, async_mode: bool = False, merge_strategy: int = None, model_hash: str = None, + model_stage: str = None, docker_image_config: str = None, terminated_task_type: str = None, sampling_count: int = None, sampling_rate: float = None, work_dir: str = '', - evaluate_config: backend_pb2.EvaluateConfig = None) -> backend_pb2.GeneralReq: + evaluate_config: mir_cmd_pb.EvaluateConfig = None) -> backend_pb2.GeneralReq: request = make_cmd_request(req_type=req_type, user_id=user_id, repo_id=repo_id, @@ -131,6 +136,7 @@ def make_invoker_cmd_call(invoker: Any, executant_name=executant_name, merge_strategy=merge_strategy, model_hash=model_hash, + model_stage=model_stage, docker_image_config=docker_image_config, terminated_task_type=terminated_task_type, sampling_count=sampling_count, diff --git a/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py b/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py index 13e067146d..bda426ee67 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py +++ b/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py @@ -1,9 +1,5 @@ from controller.invoker import ( - invoker_cmd_branch_checkout, invoker_cmd_branch_commit, - invoker_cmd_branch_create, - invoker_cmd_branch_delete, - invoker_cmd_branch_list, invoker_cmd_evaluate, invoker_cmd_filter, invoker_cmd_gpu_info, @@ -11,11 +7,11 @@ invoker_cmd_init, invoker_cmd_label_add, invoker_cmd_label_get, - invoker_cmd_log, invoker_cmd_merge, invoker_cmd_pull_image, invoker_cmd_repo_check, invoker_cmd_repo_clear, + invoker_cmd_sandbox_version, invoker_cmd_sampling, invoker_cmd_terminate, invoker_cmd_user_create, @@ -25,10 +21,6 @@ from proto import backend_pb2 RequestTypeToInvoker = { - backend_pb2.CMD_BRANCH_CHECKOUT: invoker_cmd_branch_checkout.BranchCheckoutInvoker, - backend_pb2.CMD_BRANCH_CREATE: invoker_cmd_branch_create.BranchCreateInvoker, - backend_pb2.CMD_BRANCH_DEL: invoker_cmd_branch_delete.BranchDeleteInvoker, - backend_pb2.CMD_BRANCH_LIST: invoker_cmd_branch_list.BranchListInvoker, backend_pb2.CMD_COMMIT: invoker_cmd_branch_commit.BranchCommitInvoker, backend_pb2.CMD_EVALUATE: invoker_cmd_evaluate.EvaluateInvoker, backend_pb2.CMD_FILTER: invoker_cmd_filter.FilterBranchInvoker, @@ -37,14 +29,14 @@ backend_pb2.CMD_INIT: invoker_cmd_init.InitInvoker, backend_pb2.CMD_LABEL_ADD: invoker_cmd_label_add.LabelAddInvoker, backend_pb2.CMD_LABEL_GET: invoker_cmd_label_get.LabelGetInvoker, - backend_pb2.CMD_LOG: invoker_cmd_log.LogInvoker, backend_pb2.CMD_MERGE: invoker_cmd_merge.MergeInvoker, backend_pb2.CMD_PULL_IMAGE: invoker_cmd_pull_image.ImageHandler, backend_pb2.CMD_TERMINATE: invoker_cmd_terminate.CMDTerminateInvoker, backend_pb2.CMD_REPO_CHECK: invoker_cmd_repo_check.RepoCheckInvoker, backend_pb2.CMD_REPO_CLEAR: invoker_cmd_repo_clear.RepoClearInvoker, + backend_pb2.CMD_SAMPLING: invoker_cmd_sampling.SamplingInvoker, + backend_pb2.CMD_VERSIONS_GET: invoker_cmd_sandbox_version.SandboxVersionInvoker, backend_pb2.REPO_CREATE: invoker_cmd_init.InitInvoker, backend_pb2.TASK_CREATE: invoker_task_factory.CreateTaskInvokerFactory, backend_pb2.USER_CREATE: invoker_cmd_user_create.UserCreateInvoker, - backend_pb2.CMD_SAMPLING: invoker_cmd_sampling.SamplingInvoker, } diff --git a/ymir/backend/src/ymir_controller/controller/utils/revs.py b/ymir/backend/src/ymir_controller/controller/utils/revs.py index 9732886ecd..b537a32883 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/revs.py +++ b/ymir/backend/src/ymir_controller/controller/utils/revs.py @@ -1,6 +1,7 @@ from typing import List from proto import backend_pb2 +from mir.protos import mir_command_pb2 as mir_cmd_pb def join_tvt_branch_tid(branch_id: str, tvt_type: str = None, tid: str = None) -> str: @@ -16,9 +17,9 @@ def join_tvt_branch_tid(branch_id: str, tvt_type: str = None, tid: str = None) - def build_tvt_dataset_id(tvt_dataset_id: str) -> backend_pb2.TaskReqTraining.TrainingDatasetType: _prefix_to_tvt = { - 'tr': backend_pb2.TvtTypeTraining, - 'va': backend_pb2.TvtTypeValidation, - 'te': backend_pb2.TvtTypeTest, + 'tr': mir_cmd_pb.TvtTypeTraining, + 'va': mir_cmd_pb.TvtTypeValidation, + 'te': mir_cmd_pb.TvtTypeTest, } dataset_type = backend_pb2.TaskReqTraining.TrainingDatasetType() split_data = tvt_dataset_id.split(':') @@ -33,12 +34,12 @@ def build_tvt_dataset_id(tvt_dataset_id: str) -> backend_pb2.TaskReqTraining.Tra return dataset_type -def join_tvt_dataset_id(tvt_type: backend_pb2.TvtType, dataset_id: str) -> str: +def join_tvt_dataset_id(tvt_type: mir_cmd_pb.TvtType, dataset_id: str) -> str: _tvt_to_prefix = { - backend_pb2.TvtTypeUnknown: '', - backend_pb2.TvtTypeTraining: 'tr:', - backend_pb2.TvtTypeValidation: 'va:', - backend_pb2.TvtTypeTest: 'te:', + mir_cmd_pb.TvtTypeUnknown: '', + mir_cmd_pb.TvtTypeTraining: 'tr:', + mir_cmd_pb.TvtTypeValidation: 'va:', + mir_cmd_pb.TvtTypeTest: 'te:', } return ''.join([_tvt_to_prefix[tvt_type], dataset_id]) diff --git a/ymir/backend/src/ymir_controller/controller/utils/tasks_util.py b/ymir/backend/src/ymir_controller/controller/utils/tasks_util.py index 6a738ef161..700889c5d2 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/tasks_util.py +++ b/ymir/backend/src/ymir_controller/controller/utils/tasks_util.py @@ -8,12 +8,11 @@ def register_monitor_log(task_id: str, - user_id: str, log_path_weights: Dict[str, float], description: str = None) -> None: resp = requests.post( url=f"{common_task_config.MONITOR_URL}/api/v1/tasks", - json=dict(task_id=task_id, user_id=user_id, log_path_weights=log_path_weights, description=description), + json=dict(task_id=task_id, log_path_weights=log_path_weights, description=description), timeout=5, ) diff --git a/ymir/backend/src/ymir_controller/controller/utils/utils.py b/ymir/backend/src/ymir_controller/controller/utils/utils.py index 20c5fe70cf..be03e080b0 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/utils.py +++ b/ymir/backend/src/ymir_controller/controller/utils/utils.py @@ -1,8 +1,9 @@ +from functools import wraps import logging +from pathlib import Path import re import subprocess import time -from functools import wraps from typing import Callable, Dict, List from controller.config import label_task as label_task_config @@ -11,6 +12,7 @@ from controller.label_model.label_studio import LabelStudio from id_definition import task_id as task_id_proto from id_definition.error_codes import CTLResponseCode +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 @@ -66,12 +68,12 @@ def sub_task_id(task_id: str, offset: int) -> str: return task_id[0] + str(offset) + task_id[2:] -def annotation_format_str(format: backend_pb2.LabelFormat) -> str: +def annotation_format_str(format: mir_cmd_pb.AnnoFormat) -> str: format_enum_dict = { - backend_pb2.LabelFormat.NO_ANNOTATION: 'none', - backend_pb2.LabelFormat.PASCAL_VOC: 'voc', - backend_pb2.LabelFormat.IF_ARK: 'ark', - backend_pb2.LabelFormat.LABEL_STUDIO_JSON: 'ls_json', + mir_cmd_pb.AnnoFormat.AF_NO_ANNOTATION: 'none', + mir_cmd_pb.AnnoFormat.AF_DET_PASCAL_VOC: 'det-voc', + mir_cmd_pb.AnnoFormat.AF_DET_ARK_JSON: 'det-ark', + mir_cmd_pb.AnnoFormat.AF_DET_LS_JSON: 'det-ls-json', } return format_enum_dict[format] @@ -97,3 +99,8 @@ def create_label_instance() -> LabelBase: raise ValueError("Error! Please setting your label tools") return label_instance + + +def ensure_dirs_exist(paths: List[str]) -> None: + for path in paths: + Path(path).mkdir(parents=True, exist_ok=True) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_checkout.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_checkout.py deleted file mode 100644 index ab0f0c4ffd..0000000000 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_checkout.py +++ /dev/null @@ -1,85 +0,0 @@ -import logging -import os -import shutil -import unittest -from unittest import mock - -from google.protobuf.json_format import MessageToDict, ParseDict - -import tests.utils as test_utils -from controller.utils.invoker_call import make_invoker_cmd_call -from controller.utils.invoker_mapping import RequestTypeToInvoker -from proto import backend_pb2 - -RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' - - -class TestInvokerCheckoutCommit(unittest.TestCase): - def __init__(self, methodName: str) -> None: - # dir structure: - # test_involer_CLSNAME_sandbox_root - # ├── media_storage_root - # └── test_user - # └── ymir-dvc-test - super().__init__(methodName=methodName) - self._user_name = "user" - self._mir_repo_name = "repoid" - self._storage_name = "media_storage_root" - self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz5' - self._singleton_op = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' - - self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - self._user_root = os.path.join(self._sandbox_root, self._user_name) - self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) - self._storage_root = os.path.join(self._sandbox_root, self._storage_name) - - def setUp(self): - test_utils.check_commands() - self._prepare_dirs() - self._prepare_mir_repo() - logging.info("preparing done.") - - def tearDown(self): - if os.path.isdir(self._sandbox_root): - shutil.rmtree(self._sandbox_root) - pass - - # custom: env prepare - def _prepare_dirs(self): - if os.path.isdir(self._sandbox_root): - logging.info("sandbox root exists, remove it first") - shutil.rmtree(self._sandbox_root) - os.makedirs(self._sandbox_root) - os.mkdir(self._user_root) - os.mkdir(self._mir_repo_root) - os.mkdir(self._storage_root) - - def _prepare_mir_repo(self): - # init repo - test_utils.mir_repo_init(self._mir_repo_root) - # prepare branch a - - def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = RET_ID - return ret - - @mock.patch("subprocess.run", side_effect=_mock_run_func) - def test_invoker_00(self, mock_run): - response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_BRANCH_CHECKOUT], - sandbox_root=self._sandbox_root, - req_type=backend_pb2.CMD_BRANCH_CHECKOUT, - user_id=self._user_name, - repo_id=self._mir_repo_name, - task_id=self._task_id, - singleton_op=self._singleton_op) - print(MessageToDict(response)) - - expected_cmd = "mir checkout --root {0} {1}".format(self._mir_repo_root, self._singleton_op) - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) - - expected_ret = backend_pb2.GeneralResp() - expected_dict = {'message': RET_ID} - ParseDict(expected_dict, expected_ret) - self.assertEqual(response, expected_ret) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_create.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_create.py deleted file mode 100644 index b70ad126de..0000000000 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_create.py +++ /dev/null @@ -1,85 +0,0 @@ -import logging -import os -import shutil -import unittest -from unittest import mock - -from google.protobuf.json_format import MessageToDict, ParseDict - -import tests.utils as test_utils -from controller.utils.invoker_call import make_invoker_cmd_call -from controller.utils.invoker_mapping import RequestTypeToInvoker -from proto import backend_pb2 - -RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' - - -class TestInvokerBranchCreate(unittest.TestCase): - def __init__(self, methodName: str) -> None: - # dir structure: - # test_involer_CLSNAME_sandbox_root - # ├── media_storage_root - # └── test_user - # └── ymir-dvc-test - super().__init__(methodName=methodName) - self._user_name = "user" - self._mir_repo_name = "repoid" - self._storage_name = "media_storage_root" - self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz5' - self._singleton_op = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' - - self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - self._user_root = os.path.join(self._sandbox_root, self._user_name) - self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) - self._storage_root = os.path.join(self._sandbox_root, self._storage_name) - - def setUp(self): - test_utils.check_commands() - self._prepare_dirs() - self._prepare_mir_repo() - logging.info("preparing done.") - - def tearDown(self): - # if os.path.isdir(self._sandbox_root): - # shutil.rmtree(self._sandbox_root) - pass - - # custom: env prepare - def _prepare_dirs(self): - if os.path.isdir(self._sandbox_root): - logging.info("sandbox root exists, remove it first") - shutil.rmtree(self._sandbox_root) - os.makedirs(self._sandbox_root) - os.mkdir(self._user_root) - os.mkdir(self._mir_repo_root) - os.mkdir(self._storage_root) - - def _prepare_mir_repo(self): - # init repo - test_utils.mir_repo_init(self._mir_repo_root) - # prepare branch a - - def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = RET_ID - return ret - - @mock.patch("subprocess.run", side_effect=_mock_run_func) - def test_invoker_00(self, mock_run): - response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_BRANCH_CREATE], - sandbox_root=self._sandbox_root, - req_type=backend_pb2.CMD_BRANCH_CREATE, - user_id=self._user_name, - repo_id=self._mir_repo_name, - task_id=self._task_id, - singleton_op=self._singleton_op) - print(MessageToDict(response)) - - expected_cmd = "mir checkout --root {0} -b {1}".format(self._mir_repo_root, self._singleton_op) - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) - - expected_ret = backend_pb2.GeneralResp() - expected_dict = {'message': RET_ID} - ParseDict(expected_dict, expected_ret) - self.assertEqual(response, expected_ret) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_delete.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_delete.py deleted file mode 100644 index 05a76df252..0000000000 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_delete.py +++ /dev/null @@ -1,86 +0,0 @@ -import logging -import os -import shutil -import unittest -from unittest import mock - -from google.protobuf.json_format import MessageToDict, ParseDict - -import tests.utils as test_utils -from controller.utils.invoker_call import make_invoker_cmd_call -from controller.utils.invoker_mapping import RequestTypeToInvoker -from proto import backend_pb2 - -RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' - - -class TestInvokerBranchDelete(unittest.TestCase): - def __init__(self, methodName: str) -> None: - # dir structure: - # test_involer_CLSNAME_sandbox_root - # ├── media_storage_root - # └── test_user - # └── ymir-dvc-test - super().__init__(methodName=methodName) - self._user_name = "user" - self._mir_repo_name = "repoid" - self._storage_name = "media_storage_root" - self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz5' - self._singleton_op = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' - - self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - self._user_root = os.path.join(self._sandbox_root, self._user_name) - self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) - self._storage_root = os.path.join(self._sandbox_root, self._storage_name) - - def setUp(self): - test_utils.check_commands() - self._prepare_dirs() - self._prepare_mir_repo() - logging.info("preparing done.") - - def tearDown(self): - if os.path.isdir(self._sandbox_root): - shutil.rmtree(self._sandbox_root) - pass - - # custom: env prepare - def _prepare_dirs(self): - if os.path.isdir(self._sandbox_root): - logging.info("sandbox root exists, remove it first") - shutil.rmtree(self._sandbox_root) - os.makedirs(self._sandbox_root) - os.mkdir(self._user_root) - os.mkdir(self._mir_repo_root) - os.mkdir(self._storage_root) - - def _prepare_mir_repo(self): - # init repo - test_utils.mir_repo_init(self._mir_repo_root) - # prepare branch a - - def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = RET_ID - return ret - - @mock.patch("subprocess.run", side_effect=_mock_run_func) - def test_invoker_00(self, mock_run): - response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_BRANCH_DEL], - sandbox_root=self._sandbox_root, - req_type=backend_pb2.CMD_BRANCH_DEL, - user_id=self._user_name, - repo_id=self._mir_repo_name, - task_id=self._task_id, - singleton_op=self._singleton_op, - force=False) - print(MessageToDict(response)) - - expected_cmd = "mir branch --root {0} -d {1}".format(self._mir_repo_root, self._singleton_op) - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) - - expected_ret = backend_pb2.GeneralResp() - expected_dict = {'message': RET_ID} - ParseDict(expected_dict, expected_ret) - self.assertEqual(response, expected_ret) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_list.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_list.py deleted file mode 100644 index f61accb922..0000000000 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_branch_list.py +++ /dev/null @@ -1,84 +0,0 @@ -import logging -import os -import shutil -import unittest -from unittest import mock - -from google.protobuf.json_format import MessageToDict, ParseDict - -import tests.utils as test_utils -from controller.utils.invoker_call import make_invoker_cmd_call -from controller.utils.invoker_mapping import RequestTypeToInvoker -from proto import backend_pb2 - -RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' - - -class TestInvokerBranchList(unittest.TestCase): - def __init__(self, methodName: str) -> None: - # dir structure: - # test_involer_CLSNAME_sandbox_root - # ├── media_storage_root - # └── test_user - # └── ymir-dvc-test - super().__init__(methodName=methodName) - self._user_name = "user" - self._mir_repo_name = "repoid" - self._storage_name = "media_storage_root" - self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz5' - self._singleton_op = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' - - self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - self._user_root = os.path.join(self._sandbox_root, self._user_name) - self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) - self._storage_root = os.path.join(self._sandbox_root, self._storage_name) - - def setUp(self): - test_utils.check_commands() - self._prepare_dirs() - self._prepare_mir_repo() - logging.info("preparing done.") - - def tearDown(self): - if os.path.isdir(self._sandbox_root): - shutil.rmtree(self._sandbox_root) - pass - - # custom: env prepare - def _prepare_dirs(self): - if os.path.isdir(self._sandbox_root): - logging.info("sandbox root exists, remove it first") - shutil.rmtree(self._sandbox_root) - os.makedirs(self._sandbox_root) - os.mkdir(self._user_root) - os.mkdir(self._mir_repo_root) - os.mkdir(self._storage_root) - - def _prepare_mir_repo(self): - # init repo - test_utils.mir_repo_init(self._mir_repo_root) - # prepare branch a - - def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = RET_ID - return ret - - @mock.patch("subprocess.run", side_effect=_mock_run_func) - def test_invoker_00(self, mock_run): - response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_BRANCH_LIST], - sandbox_root=self._sandbox_root, - req_type=backend_pb2.CMD_BRANCH_LIST, - user_id=self._user_name, - repo_id=self._mir_repo_name, - task_id=self._task_id) - print(MessageToDict(response)) - - expected_cmd = "mir branch --root {0}".format(self._mir_repo_root) - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) - - expected_ret = backend_pb2.GeneralResp() - expected_dict = {'extStrs': RET_ID.split('\n')} - ParseDict(expected_dict, expected_ret) - self.assertEqual(response, expected_ret) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py index 9ee4e0f6b6..7ff3f5ea34 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py @@ -6,6 +6,7 @@ from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker from proto import backend_pb2 +from mir.protos import mir_command_pb2 as mir_cmd_pb import tests.utils as test_utils @@ -61,15 +62,12 @@ def _prepare_mir_repo(self): # protected: mocked def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = 'done' - return ret + return mir_cmd_pb.Evaluation() # public: test cases - @mock.patch("subprocess.run", side_effect=_mock_run_func) + @mock.patch("controller.invoker.invoker_cmd_evaluate.det_evaluate_datasets", side_effect=_mock_run_func) def test_evaluate_00(self, mock_run): - evaluate_config = backend_pb2.EvaluateConfig() + evaluate_config = mir_cmd_pb.EvaluateConfig() evaluate_config.conf_thr = self._conf_thr evaluate_config.iou_thrs_interval = self._iou_thrs_interval @@ -83,13 +81,3 @@ def test_evaluate_00(self, mock_run): singleton_op=self._gt_dataset_id, evaluate_config=evaluate_config) self.assertEqual(response.code, 0) - self.assertEqual(response.message, 'done') - - work_dir = os.path.join(self._sandbox_root, "work_dir", backend_pb2.RequestType.Name(backend_pb2.CMD_EVALUATE), - self._task_id) - expected_cmd = f"mir evaluate --root {self._mir_repo_root} --dst-rev {self._task_id}@{self._task_id}" - expected_cmd += f" --src-revs {self._in_dataset_ids[0]}" - expected_cmd += f" --gt-rev {self._gt_dataset_id}@{self._gt_dataset_id}" - expected_cmd += f" -w {work_dir} --conf-thr {self._conf_thr:.2f}" - expected_cmd += f" --iou-thrs {self._iou_thrs_interval}" - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_filter.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_filter.py index 592b3047a1..b292f2be73 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_filter.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_filter.py @@ -68,7 +68,7 @@ def _mock_run_func(*args, **kwargs): @mock.patch("subprocess.run", side_effect=_mock_run_func) def test_invoker_00(self, mock_run): - labels.UserLabels.get_main_names = mock.Mock(return_value=["car", "person"]) + labels.UserLabels.main_name_for_ids = mock.Mock(return_value=["car", "person"]) in_class_ids = [1, 2] ex_class_ids = [3] response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_FILTER], @@ -88,9 +88,9 @@ def test_invoker_00(self, mock_run): self._task_id) os.makedirs(working_dir, exist_ok=True) - expected_cmd = "mir filter --root {0} --dst-rev {1}@{1} --src-revs {2}@{2} -w {3} -p {4} -P {5}".format( + expect_cmd = "mir filter --root {0} --dst-rev {1}@{1} --src-revs {2}@{2} -w {3} --cis {4} --ex-cis {5}".format( self._mir_repo_root, self._task_id, self.in_dataset_ids[0], working_dir, 'car;person', 'car;person') - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) + mock_run.assert_called_once_with(expect_cmd.split(' '), capture_output=True, text=True) expected_ret = backend_pb2.GeneralResp() expected_dict = {'message': RET_ID} diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py index 557f280d24..df2dca0397 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py @@ -4,12 +4,12 @@ import shutil import unittest from unittest import mock +import yaml -import tests.utils as test_utils -from controller.invoker.invoker_cmd_inference import InferenceCMDInvoker from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker from proto import backend_pb2 +import tests.utils as test_utils class TestInvokerCMDInference(unittest.TestCase): @@ -74,14 +74,13 @@ def test_invoker_00(self, mock_run): } inference_image = "test_infer_image" model_hash = "model_hash_id" - assets_config = { - "modelskvlocation": self._storage_root, - } + model_stage = "model_stage_name" + assets_config = {"modelskvlocation": self._storage_root, 'server_runtime': 'nvidia'} mock_json = { "detection": { "pic_hash": { - "annotations": [{ + "boxes": [{ "box": { "x": 300, "y": 35, @@ -90,35 +89,45 @@ def test_invoker_00(self, mock_run): }, "class_name": "no_helmet_head", "score": 0.991247296333313, - "class_id": 3, + "class_id": 0, }], } } } - with mock.patch.object(InferenceCMDInvoker, "get_inference_result", return_value=mock_json): - make_invoker_cmd_call( - invoker=RequestTypeToInvoker[backend_pb2.CMD_INFERENCE], - sandbox_root=self._sandbox_root, - assets_config=assets_config, - req_type=backend_pb2.CMD_INFERENCE, - user_id=self._user_name, - repo_id=self._mir_repo_name, - task_id=self._task_id, - singleton_op=inference_image, - docker_image_config=json.dumps(training_config), - model_hash=model_hash, - ) - + # Store inference data. working_dir = os.path.join(self._sandbox_root, "work_dir", backend_pb2.RequestType.Name(backend_pb2.CMD_INFERENCE), self._task_id) + output_filename = os.path.join(working_dir, "out", "infer-result.json") + os.makedirs(os.path.join(working_dir, "out"), exist_ok=True) + with open(output_filename, 'w') as f: + f.write(json.dumps(mock_json)) + # store user labels. + with open(os.path.join(self._user_root, 'labels.yaml'), 'w') as f: + yaml.safe_dump({"labels": [{"id": 0, "name": "no_helmet_head"}]}, f) + + make_invoker_cmd_call( + invoker=RequestTypeToInvoker[backend_pb2.CMD_INFERENCE], + sandbox_root=self._sandbox_root, + assets_config=assets_config, + req_type=backend_pb2.CMD_INFERENCE, + user_id=self._user_name, + repo_id=self._mir_repo_name, + task_id=self._task_id, + singleton_op=inference_image, + docker_image_config=json.dumps(training_config), + model_hash=model_hash, + model_stage=model_stage, + work_dir=working_dir, + ) + os.makedirs(working_dir, exist_ok=True) config_file = os.path.join(working_dir, "inference_config.yaml") - index_file = os.path.join(working_dir, "inference_pic_index.txt") + index_file = os.path.join(working_dir, "index.txt") cmd = (f"mir infer --root {self._mir_repo_root} -w {working_dir} --model-location {self._storage_root} " - f"--index-file {index_file} --model-hash {model_hash} " + f"--index-file {index_file} --model-hash {model_hash}@{model_stage} " f"--task-config-file {config_file} --executor {inference_image}") mock_run.assert_has_calls(calls=[ diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py index 297d27a6a0..66e0317e9f 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py @@ -37,8 +37,9 @@ def __init__(self, methodName: str) -> None: def setUp(self): test_utils.check_commands() self._prepare_dirs() - labels.create_empty(label_storage_file=os.path.join(self._user_root, 'labels.yaml')) - labels.UserLabels.get_main_names = mock.Mock(return_value=["person", "cat"]) + labels.load_or_create_userlabels(label_storage_file=os.path.join(self._user_root, 'labels.yaml'), + create_ok=True) + labels.UserLabels.main_name_for_ids = mock.Mock(return_value=["person", "cat"]) logging.info("preparing done.") def tearDown(self): diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_log.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_log.py deleted file mode 100644 index 2ae2ddff60..0000000000 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_log.py +++ /dev/null @@ -1,84 +0,0 @@ -import logging -import os -import shutil -import unittest -from unittest import mock - -from google.protobuf.json_format import MessageToDict, ParseDict - -import tests.utils as test_utils -from controller.utils.invoker_call import make_invoker_cmd_call -from controller.utils.invoker_mapping import RequestTypeToInvoker -from proto import backend_pb2 - -RET_ID = 'commit c6ccc144ef6249519a21c9b29b6c074bff4e81f3\nabc' - - -class TestInvokerLog(unittest.TestCase): - def __init__(self, methodName: str) -> None: - # dir structure: - # test_involer_CLSNAME_sandbox_root - # ├── media_storage_root - # └── test_user - # └── ymir-dvc-test - super().__init__(methodName=methodName) - self._user_name = "user" - self._mir_repo_name = "repoid" - self._storage_name = "media_storage_root" - self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz5' - self._base_task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' - - self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - self._user_root = os.path.join(self._sandbox_root, self._user_name) - self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) - self._storage_root = os.path.join(self._sandbox_root, self._storage_name) - - def setUp(self): - test_utils.check_commands() - self._prepare_dirs() - self._prepare_mir_repo() - logging.info("preparing done.") - - def tearDown(self): - if os.path.isdir(self._sandbox_root): - shutil.rmtree(self._sandbox_root) - pass - - # custom: env prepare - def _prepare_dirs(self): - if os.path.isdir(self._sandbox_root): - logging.info("sandbox root exists, remove it first") - shutil.rmtree(self._sandbox_root) - os.makedirs(self._sandbox_root) - os.mkdir(self._user_root) - os.mkdir(self._mir_repo_root) - os.mkdir(self._storage_root) - - def _prepare_mir_repo(self): - # init repo - test_utils.mir_repo_init(self._mir_repo_root) - # prepare branch a - - def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = RET_ID - return ret - - @mock.patch("subprocess.run", side_effect=_mock_run_func) - def test_invoker_log_00(self, mock_run): - response = make_invoker_cmd_call(sandbox_root=self._sandbox_root, - req_type=backend_pb2.CMD_LOG, - invoker=RequestTypeToInvoker[backend_pb2.CMD_LOG], - user_id=self._user_name, - task_id=self._task_id, - repo_id=self._mir_repo_name) - print(MessageToDict(response)) - - expected_cmd = "mir log --root {0}".format(self._mir_repo_root) - mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) - - expected_ret = backend_pb2.GeneralResp() - expected_dict = {'message': RET_ID, 'extStrs': [RET_ID]} - ParseDict(expected_dict, expected_ret) - self.assertEqual(response, expected_ret) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_pull_image.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_pull_image.py index 751437df5b..c57b366b5b 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_pull_image.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_pull_image.py @@ -93,6 +93,11 @@ def test_invoker_00(self, mock_run): capture_output=True, text=True, ), + mock.call( + "docker run --rm docker_image_name cat /img-man/code-access.yaml".split(" "), + capture_output=True, + text=True + ) ] assert mock_run.call_args_list == args_list diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_sandbox_version.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_sandbox_version.py new file mode 100644 index 0000000000..6df4c7c6d3 --- /dev/null +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_sandbox_version.py @@ -0,0 +1,109 @@ +import os +import shutil +import subprocess +import unittest + +from google.protobuf.json_format import MessageToDict +import yaml + +from controller.utils.invoker_call import make_invoker_cmd_call +from controller.utils.invoker_mapping import RequestTypeToInvoker +from id_definition.error_codes import CTLResponseCode +from proto import backend_pb2 + +from tests import utils as test_utils + + +class TestCmdSandboxVersion(unittest.TestCase): + # life cycle + def __init__(self, methodName: str) -> None: + super().__init__(methodName=methodName) + self._test_root = test_utils.dir_test_root(self.id().split(".")[-3:]) + self._sandbox_a_root = os.path.join(self._test_root, 'sandbox_a') + self._sandbox_b_root = os.path.join(self._test_root, 'sandbox_b') + self._sandbox_c_root = os.path.join(self._test_root, 'sandbox_c') + + def setUp(self) -> None: + self._prepare_test_root() + self._prepare_sandbox_a() + self._prepare_sandbox_b() + self._prepare_sandbox_c() + return super().setUp() + + def tearDown(self) -> None: + self._deprepare_test_root() + return super().tearDown() + + # protected: prepare and de-prepare + def _prepare_test_root(self) -> None: + if os.path.isdir(self._test_root): + shutil.rmtree(self._test_root) + os.makedirs(self._test_root, exist_ok=True) + + def _deprepare_test_root(self) -> None: + if os.path.isdir(self._test_root): + shutil.rmtree(self._test_root) + + def _prepare_sandbox_a(self) -> None: + """ + sandbox a: sandbox with two users + """ + os.makedirs(self._sandbox_a_root) + + for user_id, repo_ids in {'0001': ['000001', '000002'], '0002': ['000001']}.items(): + os.makedirs(os.path.join(self._sandbox_a_root, user_id)) + + for repo_id in repo_ids: + self._prepare_repo(os.path.join(self._sandbox_a_root, user_id, repo_id)) + + labels_dict = {'labels': [], 'version': 1, 'ymir_version': '42.0.0'} + with open(os.path.join(self._sandbox_a_root, user_id, 'labels.yaml'), 'w') as f: + yaml.safe_dump(labels_dict, f) + + def _prepare_sandbox_b(self) -> None: + """ + sandbox b: an empty sandbox + """ + os.makedirs(self._sandbox_b_root) + + def _prepare_sandbox_c(self) -> None: + """ + sandbox c: sandbox with multiple user space versions + """ + for user_id, repo_ids in {'0001': ['000001', '000002'], '0002': ['000001']}.items(): + os.makedirs(os.path.join(self._sandbox_c_root, user_id)) + + for repo_id in repo_ids: + self._prepare_repo(os.path.join(self._sandbox_c_root, user_id, repo_id)) + + labels_dict = {'labels': [], 'version': 1, 'ymir_version': f"0.0.{int(user_id)}"} + with open(os.path.join(self._sandbox_c_root, user_id, 'labels.yaml'), 'w') as f: + yaml.safe_dump(labels_dict, f) + + @classmethod + def _prepare_repo(cls, mir_root: str) -> None: + os.makedirs(mir_root, exist_ok=True) + subprocess.run(['git', 'config', '--global', 'init.defaultBranch', 'master'], cwd=mir_root) + subprocess.run(['git', 'init'], cwd=mir_root) + + # public: test cases + def test_all(self) -> None: + # sandbox a: normal + response_a = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_VERSIONS_GET], + sandbox_root=self._sandbox_a_root, + req_type=backend_pb2.CMD_VERSIONS_GET) + print(MessageToDict(response_a)) + self.assertEqual(CTLResponseCode.CTR_OK, response_a.code) + self.assertEqual(['42.0.0'], response_a.sandbox_versions) + + # sandbox b: no users + response_b = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_VERSIONS_GET], + sandbox_root=self._sandbox_b_root, + req_type=backend_pb2.CMD_VERSIONS_GET) + self.assertEqual([], response_b.sandbox_versions) + + # sandbox c: multiple versions + response_c = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_VERSIONS_GET], + sandbox_root=self._sandbox_c_root, + req_type=backend_pb2.CMD_VERSIONS_GET) + self.assertEqual({'0.0.1', '0.0.2'}, set(response_c.sandbox_versions)) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_terminate.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_terminate.py index 391f83f7e5..6c98a0931c 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_terminate.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_terminate.py @@ -7,6 +7,7 @@ import tests.utils as test_utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 @@ -65,7 +66,7 @@ def test_invoker_00(self, mock_run): repo_id=self._mir_repo_name, task_id=self._task_id, executant_name=executant_name, - terminated_task_type=backend_pb2.TaskType.TaskTypeTraining, + terminated_task_type=mir_cmd_pb.TaskType.TaskTypeTraining, ) cmd = f"docker rm -f {executant_name}" diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_copy.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_copy.py index 223bf19e66..debc8585db 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_copy.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_copy.py @@ -10,6 +10,7 @@ from controller.utils import utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' @@ -82,15 +83,15 @@ def test_invoker_00(self, mock_run): copy_request = backend_pb2.TaskReqCopyData() copy_request.src_user_id = "usre" copy_request.src_repo_id = "repodi" - copy_request.src_dataset_id = "t000aaaabbbbbbzzzzzzzzzzzzzzb6" + in_dataset_ids = ["t000aaaabbbbbbzzzzzzzzzzzzzzb6"] mir_src_root = os.path.join(self._sandbox_root, copy_request.src_user_id, copy_request.src_repo_id) os.makedirs(mir_src_root) working_dir = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeCopyData), self._task_id, 'sub_task', - self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeCopyData), self._task_id, + 'sub_task', self._task_id) req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeCopyData + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeCopyData req_create_task.no_task_monitor = True req_create_task.copy.CopyFrom(copy_request) response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.TASK_CREATE], @@ -99,10 +100,11 @@ def test_invoker_00(self, mock_run): user_id=self._user_name, repo_id=self._mir_repo_name, task_id=self._task_id, + in_dataset_ids=in_dataset_ids, req_create_task=req_create_task) expected_cmd_copy = ("mir copy --root {0} --src-root {1} --dst-rev {2}@{2} --src-revs {3}@{3} -w {4}".format( - self._mir_repo_root, mir_src_root, self._task_id, copy_request.src_dataset_id, working_dir)) + self._mir_repo_root, mir_src_root, self._task_id, in_dataset_ids[0], working_dir)) mock_run.assert_has_calls(calls=[ mock.call(expected_cmd_copy.split(' '), capture_output=True, text=True), ]) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_exporting.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_exporting.py index 398d660c5a..d5bb823cb8 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_exporting.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_exporting.py @@ -10,6 +10,7 @@ from controller.utils import utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' @@ -79,18 +80,19 @@ def _mock_run_func(*args, **kwargs): @mock.patch("subprocess.run", side_effect=_mock_run_func) def test_invoker_00(self, mock_run): exporting_request = backend_pb2.TaskReqExporting() - exporting_request.dataset_id = self._base_task_id - exporting_request.format = backend_pb2.LabelFormat.PASCAL_VOC + in_dataset_ids = [self._base_task_id] + exporting_request.format = mir_cmd_pb.AnnoFormat.AF_DET_PASCAL_VOC exporting_request.asset_dir = self._storage_root - exporting_request.annotation_dir = self._storage_root + exporting_request.pred_dir = self._storage_root + exporting_request.gt_dir = self._storage_root req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeExportData + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeExportData req_create_task.no_task_monitor = True req_create_task.exporting.CopyFrom(exporting_request) assets_config = {'assetskvlocation': self._storage_root} working_dir = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeExportData), self._task_id, 'sub_task', - self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeExportData), self._task_id, + 'sub_task', self._task_id) response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.TASK_CREATE], sandbox_root=self._sandbox_root, @@ -99,15 +101,16 @@ def test_invoker_00(self, mock_run): user_id=self._user_name, repo_id=self._mir_repo_name, task_id=self._task_id, + in_dataset_ids=in_dataset_ids, req_create_task=req_create_task) print(MessageToDict(response)) - expected_cmd_importing = ( - "mir export --root {0} --media-location {1} --asset-dir {1} --annotation-dir {1} --src-revs {2}@{2} " - "--format {3} -w {4}".format(self._mir_repo_root, self._storage_root, self._base_task_id, 'voc', - working_dir)) + expected_cmd_exporting = ( + "mir export --root {0} --media-location {1} --asset-dir {1} --src-revs {2}@{2} --anno-format {3} -w {4} " + "--pred-dir {1} --gt-dir {1}".format(self._mir_repo_root, self._storage_root, in_dataset_ids[0], 'det-voc', + working_dir)) mock_run.assert_has_calls(calls=[ - mock.call(expected_cmd_importing.split(' '), capture_output=True, text=True), + mock.call(expected_cmd_exporting.split(' '), capture_output=True, text=True), ]) expected_ret = backend_pb2.GeneralResp() diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_filter.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_filter.py deleted file mode 100644 index 7e97a85a27..0000000000 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_filter.py +++ /dev/null @@ -1,120 +0,0 @@ -import logging -import os -import shutil -import unittest -from unittest import mock - -from google.protobuf.json_format import MessageToDict, ParseDict - - -from common_utils import labels -from controller.utils import utils -from controller.utils.invoker_call import make_invoker_cmd_call -from controller.utils.invoker_mapping import RequestTypeToInvoker -from proto import backend_pb2 -import tests.utils as test_utils - -RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' - - -class TestInvokerTaskFilter(unittest.TestCase): - def __init__(self, methodName: str) -> None: - # dir structure: - # test_involer_CLSNAME_sandbox_root - # ├── media_storage_root - # └── test_user - # └── ymir-dvc-test - super().__init__(methodName=methodName) - self._user_name = "user" - self._mir_repo_name = "repoid" - self._storage_name = "media_storage_root" - self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzza5' - self._sub_task_id_0 = utils.sub_task_id(self._task_id, 0) - self._sub_task_id_1 = utils.sub_task_id(self._task_id, 1) - self._base_task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' - self._guest_id1 = 't000aaaabbbbbbzzzzzzzzzzzzzzz1' - self._guest_id2 = 't000aaaabbbbbbzzzzzzzzzzzzzzz2' - - self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - self._user_root = os.path.join(self._sandbox_root, self._user_name) - self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) - self._storage_root = os.path.join(self._sandbox_root, self._storage_name) - - def setUp(self): - test_utils.check_commands() - self._prepare_dirs() - self._prepare_mir_repo() - logging.info("preparing done.") - - def tearDown(self): - if os.path.isdir(self._sandbox_root): - shutil.rmtree(self._sandbox_root) - pass - - # custom: env prepare - def _prepare_dirs(self): - if os.path.isdir(self._sandbox_root): - logging.info("sandbox root exists, remove it first") - shutil.rmtree(self._sandbox_root) - os.makedirs(self._sandbox_root) - os.mkdir(self._user_root) - os.mkdir(self._mir_repo_root) - os.mkdir(self._storage_root) - - def _prepare_mir_repo(self): - # init repo - test_utils.mir_repo_init(self._mir_repo_root) - # prepare branch a - - def _mock_run_func(*args, **kwargs): - ret = type('', (), {})() - ret.returncode = 0 - ret.stdout = RET_ID - return ret - - @mock.patch("subprocess.run", side_effect=_mock_run_func) - def test_invoker_00(self, mock_run): - labels.UserLabels.get_main_names = mock.Mock(return_value=["frisbee", "car"]) - filter_request = backend_pb2.TaskReqFilter() - filter_request.in_dataset_ids[:] = [self._guest_id1, self._guest_id2] - filter_request.in_class_ids[:] = [0, 1] - filter_request.ex_class_ids[:] = [2] - req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeFilter - req_create_task.no_task_monitor = True - req_create_task.filter.CopyFrom(filter_request) - - working_dir_root = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeFilter), self._task_id) - os.makedirs(working_dir_root, exist_ok=True) - working_dir_0 = os.path.join(working_dir_root, 'sub_task', self._sub_task_id_0) - os.makedirs(working_dir_0, exist_ok=True) - working_dir_1 = os.path.join(working_dir_root, 'sub_task', self._sub_task_id_1) - os.makedirs(working_dir_1, exist_ok=True) - - response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.TASK_CREATE], - sandbox_root=self._sandbox_root, - req_type=backend_pb2.TASK_CREATE, - user_id=self._user_name, - repo_id=self._mir_repo_name, - task_id=self._task_id, - req_create_task=req_create_task, - merge_strategy=backend_pb2.MergeStrategy.Value('HOST'), - work_dir=working_dir_root) - print(MessageToDict(response)) - - expected_cmd_merge = ("mir merge --root {0} --dst-rev {1}@{2} -s host -w {3} " - "--src-revs {4}@{4};{5}".format(self._mir_repo_root, self._task_id, self._sub_task_id_1, - working_dir_1, self._guest_id1, self._guest_id2)) - expected_cmd_filter = ("mir filter --root {0} --dst-rev {1}@{1} --src-revs {1}@{2} -w {3} " - "-p {4} -P {5}".format(self._mir_repo_root, self._task_id, self._sub_task_id_1, - working_dir_0, 'frisbee;car', 'frisbee;car')) - mock_run.assert_has_calls(calls=[ - mock.call(expected_cmd_merge.split(' '), capture_output=True, text=True), - mock.call(expected_cmd_filter.split(' '), capture_output=True, text=True), - ]) - - expected_ret = backend_pb2.GeneralResp() - expected_dict = {'message': RET_ID} - ParseDict(expected_dict, expected_ret) - self.assertEqual(response, expected_ret) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_fusion.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_fusion.py index 95091ec66f..3d730a863e 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_fusion.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_fusion.py @@ -8,6 +8,7 @@ from controller.utils import utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 import tests.utils as test_utils @@ -40,7 +41,7 @@ def __init__(self, methodName: str) -> None: self._storage_root = os.path.join(self._sandbox_root, self._storage_name) self._work_dir = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeFusion), self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeFusion), self._task_id) self._sub_work_dir_0 = os.path.join(self._work_dir, 'sub_task', self._sub_task_id_0) self._sub_work_dir_1 = os.path.join(self._work_dir, 'sub_task', self._sub_task_id_1) self._sub_work_dir_2 = os.path.join(self._work_dir, 'sub_task', self._sub_task_id_2) @@ -49,7 +50,7 @@ def setUp(self) -> None: test_utils.check_commands() self._prepare_dirs() self._prepare_mir_repo() - labels.UserLabels.get_main_names = mock.Mock(return_value=["person", "cat", "table"]) + labels.UserLabels.main_name_for_ids = mock.Mock(return_value=["person", "cat", "table"]) return super().setUp() def tearDown(self) -> None: @@ -83,17 +84,17 @@ def _mock_run_func(*args, **kwargs): @mock.patch("subprocess.run", side_effect=_mock_run_func) def test_invoker_00(self, mock_run): req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeFusion + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeFusion req_create_task.no_task_monitor = True - req_create_task.fusion.in_dataset_ids.extend([self._guest_id1, self._guest_id2]) - req_create_task.fusion.ex_dataset_ids.extend([self._guest_id3]) - req_create_task.fusion.merge_strategy = backend_pb2.MergeStrategy.HOST - req_create_task.fusion.in_class_ids.extend([1, 3, 5]) - req_create_task.fusion.count = 100 + in_dataset_ids = [self._guest_id1, self._guest_id2] + ex_dataset_ids = [self._guest_id3] + merge_strategy = backend_pb2.MergeStrategy.HOST + in_class_ids = [1, 3, 5] + sampling_count = 100 work_dir_root = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeFusion), self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeFusion), self._task_id) expected_merge_work_dir = os.path.join(work_dir_root, 'sub_task', self._sub_task_id_2) expected_filter_work_dir = os.path.join(work_dir_root, 'sub_task', self._sub_task_id_1) expected_sampling_work_dir = os.path.join(work_dir_root, 'sub_task', self._sub_task_id_0) @@ -107,7 +108,7 @@ def test_invoker_00(self, mock_run): expected_filter_cmd = f"mir filter --root {self._mir_repo_root}" expected_filter_cmd += f" --dst-rev {self._task_id}@{self._sub_task_id_1}" expected_filter_cmd += f" --src-revs {self._task_id}@{self._sub_task_id_2}" - expected_filter_cmd += f" -w {expected_filter_work_dir} -p person;cat;table" + expected_filter_cmd += f" -w {expected_filter_work_dir} --cis person;cat;table" expected_sampling_cmd = f"mir sampling --root {self._mir_repo_root}" expected_sampling_cmd += f" --dst-rev {self._task_id}@{self._task_id}" @@ -124,7 +125,11 @@ def test_invoker_00(self, mock_run): repo_id=self._mir_repo_name, task_id=self._task_id, req_create_task=req_create_task, - merge_strategy=backend_pb2.MergeStrategy.HOST, + in_dataset_ids=in_dataset_ids, + ex_dataset_ids=ex_dataset_ids, + merge_strategy=merge_strategy, + in_class_ids=in_class_ids, + sampling_count=sampling_count, ) logging.info(response) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_importing.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_import_dataset.py similarity index 73% rename from ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_importing.py rename to ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_import_dataset.py index 9167df6e32..f3a6856316 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_importing.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_import_dataset.py @@ -10,12 +10,13 @@ from controller.utils import utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' -class TestInvokerTaskImporting(unittest.TestCase): +class TestInvokerTaskImportDataset(unittest.TestCase): def __init__(self, methodName: str) -> None: # dir structure: # test_involer_CLSNAME_sandbox_root @@ -79,13 +80,15 @@ def _mock_run_func(*args, **kwargs): @mock.patch("subprocess.run", side_effect=_mock_run_func) def test_invoker_00(self, mock_run): - importing_request = backend_pb2.TaskReqImporting() - importing_request.asset_dir = self._storage_root - importing_request.annotation_dir = self._storage_root + import_dataset_request = backend_pb2.TaskReqImportDataset() + import_dataset_request.asset_dir = self._storage_root + import_dataset_request.pred_dir = self._storage_root + import_dataset_request.gt_dir = self._storage_root + import_dataset_request.unknown_types_strategy = backend_pb2.UnknownTypesStrategy.UTS_ADD req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeImportData + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeImportData req_create_task.no_task_monitor = True - req_create_task.importing.CopyFrom(importing_request) + req_create_task.import_dataset.CopyFrom(import_dataset_request) assets_config = {'assetskvlocation': self._storage_root} response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.TASK_CREATE], sandbox_root=self._sandbox_root, @@ -97,16 +100,18 @@ def test_invoker_00(self, mock_run): req_create_task=req_create_task) working_dir = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeImportData), self._task_id, 'sub_task', - self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeImportData), self._task_id, + 'sub_task', self._task_id) os.makedirs(working_dir, exist_ok=True) - expected_cmd_importing = ("mir import --root {0} --dataset-name {1} --dst-rev {1}@{1} --src-revs {2} " - "--index-file {3} --gen-dir {4} -w {5} --annotation-dir {4}".format( - self._mir_repo_root, self._task_id, 'master', - os.path.join(working_dir, 'index.txt'), self._storage_root, working_dir)) + expected_cmd_import_dataset = ( + "mir import --root {0} --dst-rev {1}@{1} --src-revs {2} " + "--index-file {3} --gen-dir {4} -w {5} --anno-type {6} --pred-dir {4} --gt-dir {4} " + "--unknown-types-strategy add".format(self._mir_repo_root, self._task_id, 'master', + os.path.join(working_dir, 'index.txt'), self._storage_root, + working_dir, "det-box")) mock_run.assert_has_calls(calls=[ - mock.call(expected_cmd_importing.split(' '), capture_output=True, text=True), + mock.call(expected_cmd_import_dataset.split(' '), capture_output=True, text=True), ]) expected_ret = backend_pb2.GeneralResp() diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_model_importing.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_import_model.py similarity index 90% rename from ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_model_importing.py rename to ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_import_model.py index b73c9b56a5..ef557290ef 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_model_importing.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_import_model.py @@ -6,11 +6,12 @@ from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 import tests.utils as test_utils -class TestInvokerTaskModelImporting(unittest.TestCase): +class TestInvokerTaskImportModel(unittest.TestCase): # life cycle def __init__(self, methodName: str = ...) -> None: super().__init__(methodName) @@ -69,9 +70,9 @@ def _mock_run_func(*args, **kwargs): @mock.patch("subprocess.run", side_effect=_mock_run_func) def test_invoker_00(self, mock_run): req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeImportModel + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeImportModel req_create_task.no_task_monitor = True - req_create_task.model_importing.model_package_path = self._model_package_path + req_create_task.import_model.model_package_path = self._model_package_path response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.TASK_CREATE], sandbox_root=self._sandbox_root, @@ -83,7 +84,7 @@ def test_invoker_00(self, mock_run): req_create_task=req_create_task) logging.info(f"import model response: {response}") work_dir_root = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeImportModel), self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeImportModel), self._task_id) working_dir_0 = os.path.join(work_dir_root, 'sub_task', self._task_id) expected_cmd = [ 'mir', 'models', '--root', self._mir_repo_root, '--package-path', self._model_package_path, '-w', diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_labeling.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_labeling.py index 851f5bb4ad..85d2c7441b 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_labeling.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_labeling.py @@ -16,6 +16,7 @@ from controller.utils import utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 @@ -25,21 +26,21 @@ def mock_many(mocker): mocker.patch("builtins.open", mocker.mock_open(read_data="data")) mocker.patch("os.listdir", return_value=[]) mocker.patch.object(Path, "touch") - labels.UserLabels.get_main_names = mock.Mock(return_value=["fake"]) + labels.UserLabels.main_name_for_ids = mock.Mock(return_value=["fake"]) class TestTaskLabelingInvoker: def test_task_invoke(self, mocker, mock_many): label_req = backend_pb2.TaskReqLabeling() - label_req.in_class_ids[:] = [0, 1] + in_class_ids = [0, 1] label_req.labeler_accounts[:] = ["a@a.com"] label_req.project_name = "fake_project_name" - label_req.dataset_id = "id" + in_dataset_ids = ["id"] label_req.expert_instruction_url = "url" label_req.export_annotation = False req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeLabel + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeLabel req_create_task.labeling.CopyFrom(label_req) req_create_task.no_task_monitor = True @@ -63,8 +64,9 @@ def test_task_invoke(self, mocker, mock_many): os.makedirs(mir_repo_root) test_utils.mir_repo_init(mir_repo_root) - working_dir = os.path.join(sandbox_root, "work_dir", backend_pb2.TaskType.Name(backend_pb2.TaskTypeLabel), - task_id, 'sub_task', task_id) + working_dir = os.path.join(sandbox_root, "work_dir", + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeLabel), task_id, 'sub_task', + task_id) if os.path.isdir(working_dir): logging.info("working_dir exists, remove it first") shutil.rmtree(working_dir) @@ -76,6 +78,8 @@ def test_task_invoke(self, mocker, mock_many): user_id=user_name, repo_id=mir_repo_name, task_id=task_id, + in_dataset_ids=in_dataset_ids, + in_class_ids=in_class_ids, req_create_task=req_create_task) assert mock_post.call_count == 4 @@ -90,6 +94,8 @@ def test_task_invoke(self, mocker, mock_many): user_id=user_name, repo_id=mir_repo_name, task_id=task_id, + in_dataset_ids=in_dataset_ids, + in_class_ids=in_class_ids, req_create_task=req_create_task) assert mock_post.call_count == 8 expected_ret = backend_pb2.GeneralResp() diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_mining.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_mining.py index 2278c0a0d3..3256b58980 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_mining.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_mining.py @@ -12,6 +12,7 @@ from controller.utils import utils from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' @@ -86,24 +87,29 @@ def test_invoker_00(self, mock_run): 'batch_size': 16, 'gpu_count': 0 } - top_k, model_hash = 300, 'abc' + top_k, model_hash, model_stage = 300, 'abc', 'first_stage' mine_task_req = backend_pb2.TaskReqMining() mine_task_req.top_k = top_k - mine_task_req.in_dataset_ids[:] = [self._guest_id1, self._guest_id2] - mine_task_req.ex_dataset_ids[:] = [self._guest_id3] + in_dataset_ids = [self._guest_id1, self._guest_id2] + ex_dataset_ids = [self._guest_id3] mine_task_req.generate_annotations = False req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeMining + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeMining req_create_task.no_task_monitor = True req_create_task.mining.CopyFrom(mine_task_req) assets_config = { 'modelskvlocation': self._storage_root, 'assetskvlocation': self._storage_root, + 'openpai_host': '', + 'openpai_token': '', + 'openpai_storage': '', + 'openpai_user': '', + 'server_runtime': 'runc', } working_dir_root = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeMining), self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeMining), self._task_id) os.makedirs(working_dir_root, exist_ok=True) working_dir_0 = os.path.join(working_dir_root, 'sub_task', self._sub_task_id_0) os.makedirs(working_dir_0, exist_ok=True) @@ -128,6 +134,9 @@ def test_invoker_00(self, mock_run): merge_strategy=backend_pb2.MergeStrategy.Value('HOST'), singleton_op='mining_image', model_hash=model_hash, + model_stage=model_stage, + in_dataset_ids=in_dataset_ids, + ex_dataset_ids=ex_dataset_ids, docker_image_config=json.dumps(mining_config), ) print(MessageToDict(response)) @@ -136,16 +145,23 @@ def test_invoker_00(self, mock_run): with open(output_config, "r") as f: config = yaml.safe_load(f) mining_config['gpu_id'] = '' - expected_config = {'executor_config': mining_config, 'task_context': {'available_gpu_id': ''}} + expected_config = { + 'executor_config': mining_config, + 'task_context': { + 'available_gpu_id': '', + 'server_runtime': 'runc', + }, + } self.assertDictEqual(expected_config, config) - asset_cache_dir = os.path.join(self._user_root, 'mining_assset_cache') + asset_cache_dir = os.path.join(self._user_root, 'asset_cache') mining_cmd = ("mir mining --root {0} --dst-rev {1}@{1} -w {2} --model-location {3} --media-location {3} " "--model-hash {5} --src-revs {1}@{6} --asset-cache-dir {9} --task-config-file {7} --executor {8} " "--executant-name {10} --topk {4}".format(self._mir_repo_root, self._task_id, working_dir_0, - self._storage_root, top_k, model_hash, - self._sub_task_id_1, output_config, 'mining_image', - asset_cache_dir, self._task_id)) + self._storage_root, top_k, + f"{model_hash}@{model_stage}", self._sub_task_id_1, + output_config, 'mining_image', asset_cache_dir, + self._task_id)) mock_run.assert_has_calls(calls=[ mock.call(expected_cmd_merge.split(' '), capture_output=True, text=True), mock.call(mining_cmd.split(' '), capture_output=True, text=True), diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_training.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_training.py index 048acfc45e..ad1d2fcdc0 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_training.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_task_training.py @@ -14,6 +14,7 @@ from controller.utils.invoker_call import make_invoker_cmd_call from controller.utils.invoker_mapping import RequestTypeToInvoker from controller.utils.redis import rds +from mir.protos import mir_command_pb2 as mir_cmd_pb from proto import backend_pb2 RET_ID = 'commit t000aaaabbbbbbzzzzzzzzzzzzzzz3\nabc' @@ -82,7 +83,7 @@ def test_invoker_00(self, mock_run): rds.zremrangebyscore = mock.Mock() gpu_utils.GPUInfo.get_gpus_info = mock.Mock(return_value={'0': 0.99, '1': 0.9, '2': 0.89}) - labels.UserLabels.get_main_names = mock.Mock(return_value=["frisbee", "car"]) + labels.UserLabels.main_name_for_ids = mock.Mock(return_value=["frisbee", "car"]) training_config = { 'anchors': '12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401', @@ -100,18 +101,18 @@ def test_invoker_00(self, mock_run): training_data_type_1 = backend_pb2.TaskReqTraining.TrainingDatasetType() training_data_type_1.dataset_id = self._guest_id1 - training_data_type_1.dataset_type = backend_pb2.TvtType.TvtTypeTraining + training_data_type_1.dataset_type = mir_cmd_pb.TvtType.TvtTypeTraining training_data_type_2 = backend_pb2.TaskReqTraining.TrainingDatasetType() training_data_type_2.dataset_id = self._guest_id2 - training_data_type_2.dataset_type = backend_pb2.TvtType.TvtTypeValidation + training_data_type_2.dataset_type = mir_cmd_pb.TvtType.TvtTypeValidation train_task_req = backend_pb2.TaskReqTraining() - train_task_req.in_dataset_types.append(training_data_type_1) train_task_req.in_dataset_types.append(training_data_type_2) - train_task_req.in_class_ids[:] = [0, 1] + train_task_req.in_dataset_types.append(training_data_type_1) + in_class_ids = [0, 1] req_create_task = backend_pb2.ReqCreateTask() - req_create_task.task_type = backend_pb2.TaskTypeTraining + req_create_task.task_type = mir_cmd_pb.TaskType.TaskTypeTraining req_create_task.no_task_monitor = True req_create_task.training.CopyFrom(train_task_req) training_image = 'test_training_image' @@ -119,10 +120,15 @@ def test_invoker_00(self, mock_run): 'modelsuploadlocation': self._storage_root, 'assetskvlocation': self._storage_root, 'tensorboard_root': self._tensorboard_root, + 'openpai_host': '', + 'openpai_token': '', + 'openpai_storage': '', + 'openpai_user': '', + 'server_runtime': 'runc', } working_dir_root = os.path.join(self._sandbox_root, "work_dir", - backend_pb2.TaskType.Name(backend_pb2.TaskTypeTraining), self._task_id) + mir_cmd_pb.TaskType.Name(mir_cmd_pb.TaskType.TaskTypeTraining), self._task_id) os.makedirs(working_dir_root, exist_ok=True) working_dir_0 = os.path.join(working_dir_root, 'sub_task', self._sub_task_id_0) os.makedirs(working_dir_0, exist_ok=True) @@ -139,6 +145,7 @@ def test_invoker_00(self, mock_run): req_create_task=req_create_task, merge_strategy=backend_pb2.MergeStrategy.Value('HOST'), singleton_op=training_image, + in_class_ids=in_class_ids, docker_image_config=json.dumps(training_config)) print(MessageToDict(response)) @@ -153,12 +160,18 @@ def test_invoker_00(self, mock_run): training_config["class_names"] = ["frisbee", "car"] training_config['gpu_id'] = '0' - expected_config = {'executor_config': training_config, 'task_context': {'available_gpu_id': '1'}} + expected_config = { + 'executor_config': training_config, + 'task_context': { + 'available_gpu_id': '1', + 'server_runtime': 'runc', + }, + } logging.info(f"xxx config: {config}") # for test self.assertDictEqual(expected_config, config) tensorboard_dir = os.path.join(self._tensorboard_root, self._user_name, self._task_id) - asset_cache_dir = os.path.join(self._sandbox_root, self._user_name, "training_assset_cache") + asset_cache_dir = os.path.join(self._sandbox_root, self._user_name, "asset_cache") training_cmd = ("mir train --root {0} --dst-rev {1}@{1} --model-location {2} " "--media-location {2} -w {3} --src-revs {1}@{4} --task-config-file {5} --executor {6} " diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_util_labels.py b/ymir/backend/src/ymir_controller/tests/unit/test_util_labels.py index 41e0be8494..f1dd77caad 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_util_labels.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_util_labels.py @@ -5,6 +5,7 @@ import unittest from common_utils import labels +from mir.tools.class_ids import UserLabels, SingleLabel import tests.utils as test_utils @@ -34,7 +35,7 @@ def __init__(self, methodName: str) -> None: def setUp(self): test_utils.check_commands() self._prepare_dirs() - labels.create_empty(label_storage_file=self._label_storage_file) + labels.load_or_create_userlabels(label_storage_file=self._label_storage_file, create_ok=True) logging.info("preparing done.") def tearDown(self): @@ -52,12 +53,12 @@ def _prepare_dirs(self): os.mkdir(self._storage_root) # protected: check result - def _check_result(self, expected: List[dict], actual: List[labels.SingleLabel]) -> None: + def _check_result(self, expected: List[dict], actual: List[SingleLabel]) -> None: try: expected_length = len(expected) self.assertEqual(expected_length, len(actual)) for idx in range(expected_length): - expected_label: labels.SingleLabel = expected[idx]['_label'] + expected_label: SingleLabel = expected[idx]['_label'] is_modified: bool = expected[idx]['_is_modified'] actual_label = actual[idx] @@ -80,7 +81,7 @@ def _check_result(self, expected: List[dict], actual: List[labels.SingleLabel]) # public: test cases def test_merge(self): # case 0: add 3 new labels - candidate_labels_1 = labels.UserLabels.parse_obj({ + candidate_labels_1 = UserLabels.parse_obj({ 'labels': [{ 'name': 'a', 'aliases': ['aa', 'aaa'] @@ -92,26 +93,26 @@ def test_merge(self): 'aliases': [] }] }) - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_1, - check_only=False) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_1, check_only=False) + expected = [{ - '_label': labels.SingleLabel(id=0, name='a', aliases=['aa', 'aaa']), + '_label': SingleLabel(id=0, name='a', aliases=['aa', 'aaa']), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=1, name='h', aliases=['hh', 'hhh']), + '_label': SingleLabel(id=1, name='h', aliases=['hh', 'hhh']), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=2, name='z'), + '_label': SingleLabel(id=2, name='z'), '_is_modified': False, }] self.assertFalse(conflict_labels.labels) self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + actual=UserLabels(storage_file=self._label_storage_file).labels) # a unchanged, m with a conflicted alias hh, so all merge is ignored # no change will made to storage file - candidate_labels_2 = labels.UserLabels.parse_obj({ + candidate_labels_2 = UserLabels.parse_obj({ 'labels': [{ 'name': 'a', 'aliases': ['aa', 'aaa'] @@ -124,16 +125,14 @@ def test_merge(self): }] }) # candidate_labels = ['a,aa,aaa', 'm,hh', 'zz'] - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_2, - check_only=False) - conflict_labels_expected = labels.UserLabels.parse_obj({'labels': [{'name': 'm', 'aliases': ['hh']}]}) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_2, check_only=False) + conflict_labels_expected = UserLabels.parse_obj({'labels': [{'name': 'm', 'aliases': ['hh']}]}) self.assertDictEqual(conflict_labels_expected.dict(), conflict_labels.dict()) - self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + self._check_result(expected=expected, actual=UserLabels(storage_file=self._label_storage_file).labels) # a: reset aliases, h: reset aliases, x: add new, z: unchanged - candidate_labels_3 = labels.UserLabels.parse_obj({ + candidate_labels_3 = UserLabels.parse_obj({ 'labels': [{ 'name': 'A', 'aliases': ['aa'] @@ -146,102 +145,95 @@ def test_merge(self): }] }) # candidate_labels = ["A,aa", "h", "x,xx,xxx"] - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_3, - check_only=False) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_3, check_only=False) expected = [{ - '_label': labels.SingleLabel(id=0, name='a', aliases=['aa']), + '_label': SingleLabel(id=0, name='a', aliases=['aa']), '_is_modified': True, }, { - '_label': labels.SingleLabel(id=1, name='h'), + '_label': SingleLabel(id=1, name='h'), '_is_modified': True, }, { - '_label': labels.SingleLabel(id=2, name='z'), + '_label': SingleLabel(id=2, name='z'), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=3, name='x', aliases=['xx', 'xxx']), + '_label': SingleLabel(id=3, name='x', aliases=['xx', 'xxx']), '_is_modified': False, }] self.assertFalse(conflict_labels.labels) - self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + self._check_result(expected=expected, actual=UserLabels(storage_file=self._label_storage_file).labels) # h: reset aliases with conflict, so all merge is ignored, storage file unchanged # candidate_labels = ["h,a"] - candidate_labels_4 = labels.UserLabels.parse_obj({'labels': [{'name': 'h', 'aliases': ['a']}]}) - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_4, - check_only=False) - conflict_labels_expected = labels.UserLabels.parse_obj({'labels': [{'name': 'h', 'aliases': ['a']}]}) + candidate_labels_4 = UserLabels.parse_obj({'labels': [{'name': 'h', 'aliases': ['a']}]}) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_4, check_only=False) + + conflict_labels_expected = UserLabels.parse_obj({'labels': [{'name': 'h', 'aliases': ['a']}]}) self.assertDictEqual(conflict_labels_expected.dict(), conflict_labels.dict()) - self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + self._check_result(expected=expected, actual=UserLabels(storage_file=self._label_storage_file).labels) # checkonly, wants to add c # candidate_labels = ['c,cc,ccc'] - candidate_labels_5 = labels.UserLabels.parse_obj({'labels': [{'name': 'c', 'aliases': ['cc', 'ccc']}]}) - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_5, - check_only=True) + candidate_labels_5 = UserLabels.parse_obj({'labels': [{'name': 'c', 'aliases': ['cc', 'ccc']}]}) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_5, check_only=True) + self.assertFalse(conflict_labels.labels) - self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + self._check_result(expected=expected, actual=UserLabels(storage_file=self._label_storage_file).labels) # add again # candidate_labels = ['c,cc,ccc'] - candidate_labels_6 = labels.UserLabels.parse_obj({'labels': [{'name': 'c', 'aliases': ['cc', 'ccc']}]}) - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_6, - check_only=False) + candidate_labels_6 = UserLabels.parse_obj({'labels': [{'name': 'c', 'aliases': ['cc', 'ccc']}]}) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_6, check_only=False) expected = [{ - '_label': labels.SingleLabel(id=0, name='a', aliases=['aa']), + '_label': SingleLabel(id=0, name='a', aliases=['aa']), '_is_modified': True, }, { - '_label': labels.SingleLabel(id=1, name='h'), + '_label': SingleLabel(id=1, name='h'), '_is_modified': True, }, { - '_label': labels.SingleLabel(id=2, name='z'), + '_label': SingleLabel(id=2, name='z'), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=3, name='x', aliases=['xx', 'xxx']), + '_label': SingleLabel(id=3, name='x', aliases=['xx', 'xxx']), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=4, name='c', aliases=['cc', 'ccc']), + '_label': SingleLabel(id=4, name='c', aliases=['cc', 'ccc']), '_is_modified': False, }] self.assertFalse(conflict_labels.labels) - self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + self._check_result(expected=expected, actual=UserLabels(storage_file=self._label_storage_file).labels) # add label with head and tail spaces - candidate_labels_7 = labels.UserLabels.parse_obj( + candidate_labels_7 = UserLabels.parse_obj( {'labels': [{ 'name': ' d ', 'aliases': ['dd ', ' ddd', ' d d d'] }]}) # candidate_labels = [' d ,dd , ddd, d d d'] - conflict_labels = labels.merge_labels(label_storage_file=self._label_storage_file, - new_labels=candidate_labels_7, - check_only=False) + user_labels = UserLabels(storage_file=self._label_storage_file) + conflict_labels = user_labels.upsert_labels(new_labels=candidate_labels_7, check_only=False) + expected = [{ - '_label': labels.SingleLabel(id=0, name='a', aliases=['aa']), + '_label': SingleLabel(id=0, name='a', aliases=['aa']), '_is_modified': True, }, { - '_label': labels.SingleLabel(id=1, name='h'), + '_label': SingleLabel(id=1, name='h'), '_is_modified': True, }, { - '_label': labels.SingleLabel(id=2, name='z'), + '_label': SingleLabel(id=2, name='z'), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=3, name='x', aliases=['xx', 'xxx']), + '_label': SingleLabel(id=3, name='x', aliases=['xx', 'xxx']), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=4, name='c', aliases=['cc', 'ccc']), + '_label': SingleLabel(id=4, name='c', aliases=['cc', 'ccc']), '_is_modified': False, }, { - '_label': labels.SingleLabel(id=5, name='d', aliases=['dd', 'ddd', 'd d d']), + '_label': SingleLabel(id=5, name='d', aliases=['dd', 'ddd', 'd d d']), '_is_modified': False, }] self.assertFalse(conflict_labels.labels) - self._check_result(expected=expected, - actual=labels.get_user_labels_from_storage(self._label_storage_file).labels) + self._check_result(expected=expected, actual=UserLabels(storage_file=self._label_storage_file).labels) diff --git a/ymir/backend/src/ymir_hel/common/constants/code.go b/ymir/backend/src/ymir_hel/common/constants/code.go new file mode 100644 index 0000000000..ab1851659c --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/constants/code.go @@ -0,0 +1,13 @@ +package constants + +type ResponseCode int + +const ( + CodeSuccess ResponseCode = 0 + + // Viewer Error Code + CodeViewerGeneral ResponseCode = 180100 + CodeViewerDataMiss ResponseCode = 180101 + CodeViewerInvalidParms ResponseCode = 180102 + CodeViewerRepoNotExist ResponseCode = 180103 +) diff --git a/ymir/backend/src/ymir_hel/common/constants/elements.go b/ymir/backend/src/ymir_hel/common/constants/elements.go new file mode 100644 index 0000000000..56c64d95aa --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/constants/elements.go @@ -0,0 +1,128 @@ +package constants + +import ( + "fmt" + "path" + + "google.golang.org/protobuf/proto" + + "github.com/IndustryEssentials/ymir-hel/protos" +) + +// Mir Repos. +type MirRepo struct { + SandboxRoot string + UserID string + RepoID string + BranchID string + TaskID string +} + +func (mirRepo MirRepo) BuildRepoID() (string, string) { + mirRoot := path.Join(mirRepo.SandboxRoot, mirRepo.UserID, mirRepo.RepoID) + mirRev := fmt.Sprintf("%s@%s", mirRepo.BranchID, mirRepo.TaskID) + return mirRoot, mirRev +} + +// Mir Storage Files. +type MirFile int + +const ( + MirfileMetadatas MirFile = iota + MirfileAnnotations + MirfileKeywords + MirfileContext + MirfileTasks +) + +func (mirFile MirFile) String() string { + return []string{"metadatas.mir", "annotations.mir", "keywords.mir", "context.mir", "tasks.mir"}[mirFile] +} + +func (mirFile MirFile) ProtoData() proto.Message { + switch mirFile { + case MirfileMetadatas: + return &protos.MirMetadatas{} + + case MirfileAnnotations: + return &protos.MirAnnotations{} + + case MirfileKeywords: + return &protos.MirKeywords{} + + case MirfileContext: + return &protos.MirContext{} + + case MirfileTasks: + return &protos.MirTasks{} + + default: + return nil + } +} + +// Elements structs. +type MirTimestamp struct { + Start int32 `json:"start" bson:"start"` + Duration float32 `json:"duration" bson:"duration"` +} + +type MirIntPoint struct { + X int32 `json:"x" bson:"x"` + Y int32 `json:"y" bson:"y"` + Z int32 `json:"z" bson:"z"` +} + +type MirRect struct { + X int32 `json:"x" bson:"x"` + Y int32 `json:"y" bson:"y"` + W int32 `json:"w" bson:"w"` + H int32 `json:"h" bson:"h"` + RotateAngle float32 `json:"rotate_angle" bson:"rotate_angle"` // unit in pi. +} + +// Intermediate Mir datas. +type MirAssetAttributes struct { + Timestamp *MirTimestamp `json:"timestamp" bson:"timestamp"` + TvtType int32 `json:"tvt_type" bson:"tvt_type"` + AssetType int32 `json:"asset_type" bson:"asset_type"` + Width int32 `json:"width" bson:"width"` + Height int32 `json:"height" bson:"height"` + ImageChannels int32 `json:"image_channels" bson:"image_channels"` + ByteSize int32 `json:"byte_size" bson:"byte_size"` + OriginFilename string `json:"origin_filename" bson:"origin_filename"` +} + +type MirObjectAnnotation struct { + Index int32 `json:"index" bson:"index"` + Box *MirRect `json:"box" bson:"box"` + ClassId int32 `json:"class_id" bson:"class_id"` + Score float64 `json:"score" bson:"score"` + AnnoQuality float32 `json:"anno_quality" bson:"anno_quality"` + Tags map[string]string `json:"tags" bson:"tags"` + Cm int32 `json:"cm" bson:"cm"` + DetLinkId int32 `json:"det_link_id" bson:"det_link_id"` + ClassName string `json:"class_name" bson:"class_name"` + Polygon []*MirIntPoint `json:"polygon" bson:"polygon"` +} + +func NewMirObjectAnnotation() MirObjectAnnotation { + return MirObjectAnnotation{Box: &MirRect{}, Tags: map[string]string{}, Polygon: []*MirIntPoint{}} +} + +type DatasetStatsElement struct { + // Assets count + ClassIDsCount map[int]int64 `json:"class_ids_count"` + NegativeAssetsCount int64 `json:"negative_assets_count"` + PositiveAssetsCount int64 `json:"positive_assets_count"` + + EvalClassIDs []int32 `json:"eval_class_ids"` + + // Annotations + AnnotationsCount int64 `json:"annos_count"` + AnnotationsHist *map[string]*MirHist `json:"annos_hist"` + + // Tags + TagsCountTotal map[string]int64 `json:"tags_count_total"` + TagsCount map[string]map[string]int64 `json:"tags_count"` +} diff --git a/ymir/backend/src/ymir_hel/common/constants/histograms.go b/ymir/backend/src/ymir_hel/common/constants/histograms.go new file mode 100644 index 0000000000..ee5159038c --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/constants/histograms.go @@ -0,0 +1,73 @@ +package constants + +import ( + "encoding/json" + "fmt" + + "go.mongodb.org/mongo-driver/bson" +) + +const ( + BytesPerMB float64 = 1048576 +) + +// Mir Histograms. +type MirHist struct { + SparseBuckets *map[string]int32 `json:"-" bson:"-"` + LowerBNDs []float64 `json:"-" bson:"-"` + Ops interface{} `json:"-" bson:"-"` + Output *[]map[string]string `json:"-" bson:"output"` +} + +func (h *MirHist) BuildMirHist(bucket *map[string]int32) { + h.Output = &[]map[string]string{} + h.SparseBuckets = bucket + for _, LowerBND := range h.LowerBNDs { + histKey := fmt.Sprintf("%.2f", LowerBND) + value := "0" + if data, ok := (*h.SparseBuckets)[histKey]; ok { + value = fmt.Sprintf("%d", data) + } + *h.Output = append(*h.Output, map[string]string{"x": histKey, "y": value}) + } +} + +// MarshalJSON return json as array, not a sub-field of struct. +func (h *MirHist) MarshalJSON() ([]byte, error) { + return json.Marshal(&h.Output) +} + +// Pre-defined historgram schemas. +var ConstAssetsMirHist map[string]MirHist = map[string]MirHist{ + "quality": {Ops: "$quality", LowerBNDs: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0}}, + "bytes": {Ops: bson.M{"$divide": bson.A{"$metadata.byte_size", BytesPerMB}}, + LowerBNDs: []float64{0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0}}, + "area": {Ops: bson.M{"$multiply": bson.A{"$metadata.width", "$metadata.height"}}, + LowerBNDs: []float64{0, 100000, 500000, 1000000, 2000000, 4000000, 6000000, 8000000}}, + "hw_ratio": {Ops: bson.M{"$divide": bson.A{"$metadata.height", "$metadata.width"}}, + LowerBNDs: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5}}, +} + +var ConstGtMirHist map[string]MirHist = map[string]MirHist{ + "quality": {Ops: "$gt.anno_quality", LowerBNDs: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0}}, + "area": { + Ops: bson.M{"$multiply": bson.A{"$gt.box.w", "$gt.box.h"}}, + LowerBNDs: []float64{0, 50, 500, 2500, 5000, 10000, 50000, 100000, 200000}, + }, + "area_ratio": { + Ops: bson.M{"$divide": bson.A{bson.M{"$multiply": bson.A{"$gt.box.w", "$gt.box.h"}}, + bson.M{"$multiply": bson.A{"$metadata.width", "$metadata.height"}}}}, + LowerBNDs: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0}}, +} + +var ConstPredMirHist map[string]MirHist = map[string]MirHist{ + "quality": {Ops: "$pred.anno_quality", LowerBNDs: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0}}, + "area": { + Ops: bson.M{"$multiply": bson.A{"$pred.box.w", "$pred.box.h"}}, + LowerBNDs: []float64{0, 50, 500, 2500, 5000, 10000, 50000, 100000, 200000}, + }, + "area_ratio": { + Ops: bson.M{"$divide": bson.A{bson.M{"$multiply": bson.A{"$pred.box.w", "$pred.box.h"}}, + bson.M{"$multiply": bson.A{"$metadata.width", "$metadata.height"}}}}, + LowerBNDs: []float64{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0}}, +} diff --git a/ymir/backend/src/ymir_hel/common/constants/metrics.go b/ymir/backend/src/ymir_hel/common/constants/metrics.go new file mode 100644 index 0000000000..30d46935d3 --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/constants/metrics.go @@ -0,0 +1,47 @@ +package constants + +import ( + "time" +) + +type MirMetrics int + +const ( + MetricsUnknown MirMetrics = iota + MetricsClassIDs + MetricsModel + MetricsProject + MetricsTask +) + +var ( + MetricsDatasetStringList = []string{"_", "class_ids", "model", "project", "task"} +) + +func (mirMetrics MirMetrics) String() string { + return MetricsDatasetStringList[mirMetrics] +} + +func ParseMirMetrics(s string) MirMetrics { + for idx, v := range MetricsDatasetStringList { + if v == s { + return MirMetrics(idx) + } + } + return MetricsUnknown +} + +type MetricsDataPoint struct { + CreateTime time.Time `json:"create_time" bson:"create_time" mapstructure:"create_time"` + ID string `json:"id" bson:"id" mapstructure:"id"` + UserID string `json:"user_id" bson:"user_id" mapstructure:"user_id"` + ProjectID string `json:"project_id" bson:"project_id" mapstructure:"project_id"` + ClassIDs []int `json:"class_ids" bson:"class_ids" mapstructure:"class_ids"` + + Other map[string]interface{} `mapstructure:",remain"` +} + +type MetricsQueryPoint struct { + Legend string `json:"legend"` + Count int64 `json:"count"` +} diff --git a/ymir/backend/src/ymir_hel/common/constants/mirdatas.go b/ymir/backend/src/ymir_hel/common/constants/mirdatas.go new file mode 100644 index 0000000000..5a3a32f721 --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/constants/mirdatas.go @@ -0,0 +1,68 @@ +package constants + +import ( + "encoding/json" + + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +type MirdataModel struct { + ModelHash string `json:"model_hash"` + MeanAveragePrecision float32 `json:"mean_average_precision"` + Context string `json:"context"` + Stages map[string]interface{} `json:"stages"` + BestStageName string `json:"best_stage_name"` + + TaskParameters string `json:"task_parameters"` + ExecutorConfig map[string]interface{} `json:"executor_config"` +} + +func NewMirdataModel(taskParameters string) *MirdataModel { + modelData := MirdataModel{TaskParameters: taskParameters, ExecutorConfig: map[string]interface{}{}} + return &modelData +} + +type MirAssetDetail struct { + // Export fields. + DocID string `json:"-" bson:"_id"` + AssetID string `json:"asset_id" bson:"asset_id"` + MetaData *MirAssetAttributes `json:"metadata" bson:"metadata"` + JoinedClassIDs []int32 `json:"class_ids" bson:"class_ids"` + Gt []*MirObjectAnnotation `json:"gt" bson:"gt"` + Pred []*MirObjectAnnotation `json:"pred" bson:"pred"` + Cks map[string]string `json:"cks" bson:"cks"` + Quality float32 `json:"image_quality" bson:"quality"` +} + +func NewMirAssetDetail() MirAssetDetail { + mirAssetDetail := MirAssetDetail{} + mirAssetDetail.MetaData = &MirAssetAttributes{Timestamp: &MirTimestamp{}} + mirAssetDetail.JoinedClassIDs = []int32{} + mirAssetDetail.Pred = make([]*MirObjectAnnotation, 0) + mirAssetDetail.Gt = make([]*MirObjectAnnotation, 0) + mirAssetDetail.Cks = map[string]string{} + mirAssetDetail.Quality = -1 + return mirAssetDetail +} + +type IndexedDatasetMetadata struct { + Exist bool `json:"exist" bson:"exist"` + Ready bool `json:"ready" bson:"ready"` + + HistAssets *map[string]*MirHist `json:"hist_assets" bson:"hist_assets"` + HistAnnosGt *map[string]*MirHist `json:"hist_annos_gt" bson:"hist_annos_gt"` + HistAnnosPred *map[string]*MirHist `json:"hist_annos_pred" bson:"hist_annos_pred"` +} + +func BuildStructFromMessage(message proto.Message, structOut interface{}) interface{} { + m := protojson.MarshalOptions{EmitUnpopulated: true, AllowPartial: true, UseProtoNames: true, UseEnumNumbers: true} + jsonBytes, err := m.Marshal(message) + if err != nil { + panic(err) + } + if err := json.Unmarshal(jsonBytes, &structOut); err != nil { + panic(err) + } + return structOut +} diff --git a/ymir/backend/src/ymir_hel/common/constants/results.go b/ymir/backend/src/ymir_hel/common/constants/results.go new file mode 100644 index 0000000000..734f6f951a --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/constants/results.go @@ -0,0 +1,60 @@ +package constants + +type QueryAssetsResult struct { + AssetsDetail []MirAssetDetail `json:"elements"` + Offset int `json:"offset"` + Limit int `json:"limit"` + Anchor int64 `json:"anchor"` + TotalAssetsCount int64 `json:"total_assets_count"` +} + +type QueryDatasetStatsContext struct { + RequireAssetsHist bool `json:"require_assets_hist"` + RequireAnnotationsHist bool `json:"require_annos_hist"` + RepoIndexExist bool `json:"repo_index_exist"` + RepoIndexReady bool `json:"repo_index_ready"` +} + +type QueryDatasetStatsResult struct { + // Assets + TotalAssetsCount int64 `json:"total_assets_count"` + TotalAssetsFileSize int64 `json:"total_assets_mbytes"` + AssetsHist *map[string]*MirHist `json:"assets_hist"` + + // Annotations + Gt DatasetStatsElement `json:"gt"` + Pred DatasetStatsElement `json:"pred"` + + // Cks + CksCountTotal map[string]int64 `json:"cks_count_total"` + CksCount map[string]map[string]int64 `json:"cks_count"` + + // Task and query context. + NewTypesAdded bool `json:"new_types_added"` + QueryContext QueryDatasetStatsContext `json:"query_context"` +} + +func NewQueryDatasetStatsResult() *QueryDatasetStatsResult { + queryResult := QueryDatasetStatsResult{ + Gt: DatasetStatsElement{ + ClassIDsCount: map[int]int64{}, + TagsCount: map[string]map[string]int64{}, + TagsCountTotal: map[string]int64{}, + }, + Pred: DatasetStatsElement{ + ClassIDsCount: map[int]int64{}, + TagsCount: map[string]map[string]int64{}, + TagsCountTotal: map[string]int64{}, + }, + + CksCount: map[string]map[string]int64{}, + CksCountTotal: map[string]int64{}, + } + return &queryResult +} + +type QueryDatasetDupResult struct { + Duplication int `json:"duplication"` + TotalCount map[string]int64 `json:"total_count"` + ResidualCount map[string]int64 `json:"residual_count"` +} diff --git a/ymir/backend/src/ymir_hel/common/db/mongodb/mongo.go b/ymir/backend/src/ymir_hel/common/db/mongodb/mongo.go new file mode 100644 index 0000000000..55e4d0bd88 --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/db/mongodb/mongo.go @@ -0,0 +1,714 @@ +package mongodb + +import ( + "context" + "fmt" + "log" + "sort" + "strings" + "time" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/common/tools" + "github.com/IndustryEssentials/ymir-hel/protos" + "github.com/jinzhu/now" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/bson/primitive" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" + "go.mongodb.org/mongo-driver/x/bsonx" + "golang.org/x/exp/slices" +) + +type BaseDatabase interface { + Collection(name string, opts ...*options.CollectionOptions) *mongo.Collection +} +type MongoServer struct { + mirDatabase BaseDatabase + metricsDatabase BaseDatabase + Ctx context.Context + metadataName string + metricsPrefix string +} + +func NewMongoServer(mongoCtx context.Context, mirDatabase BaseDatabase, metricsDatabase BaseDatabase) *MongoServer { + return &MongoServer{ + mirDatabase: mirDatabase, + metricsDatabase: metricsDatabase, + Ctx: mongoCtx, + metadataName: "__collection_metadata__", + metricsPrefix: "__metrics__", + } +} + +func (s *MongoServer) getRepoCollection(mirRepo *constants.MirRepo) (*mongo.Collection, string) { + _, mirRev := mirRepo.BuildRepoID() + return s.mirDatabase.Collection(mirRev), mirRev +} + +func (s *MongoServer) getMetadataCollection() *mongo.Collection { + collection, _ := s.getRepoCollection(&constants.MirRepo{BranchID: s.metadataName}) + return collection +} + +func (s *MongoServer) setDatasetExistence(collectionName string, ready bool, exist bool) { + collection := s.getMetadataCollection() + data := bson.M{"ready": ready, "exist": exist} + s.upsertDocument(collection, collectionName, data) +} + +func (s *MongoServer) upsertDocument(collection *mongo.Collection, id string, data interface{}) { + filter := bson.M{"_id": id} + update := bson.M{"$set": data} + upsert := true + _, err := collection.UpdateOne(s.Ctx, filter, update, &options.UpdateOptions{Upsert: &upsert}) + if err != nil { + panic(err) + } +} + +func (s *MongoServer) CheckDatasetExistenceReady(mirRepo *constants.MirRepo) (exist bool, ready bool) { + defer func() { + if r := recover(); r != nil { + exist = false + ready = false + } + }() + + metadata := s.loadDatasetMetaData(mirRepo) + return metadata.Exist, metadata.Ready +} + +func (s *MongoServer) IndexDatasetData( + mirRepo *constants.MirRepo, + mirMetadatas *protos.MirMetadatas, + mirAnnotations *protos.MirAnnotations, +) { + exist, _ := s.CheckDatasetExistenceReady(mirRepo) + if exist { + return + } + + defer tools.TimeTrack(time.Now(), mirRepo.TaskID) + log.Printf("Load/Build index for %v, %d assets", mirRepo.TaskID, len(mirMetadatas.Attributes)) + + collection, collectionName := s.getRepoCollection(mirRepo) + s.setDatasetExistence(collectionName, false, true) + err := collection.Database().CreateCollection(s.Ctx, collectionName) + if err != nil { + log.Printf("Collection %s exist, skip creating.", collectionName) + } + // Cleanup if error + defer func() { + if r := recover(); r != nil { + s.setDatasetExistence(collectionName, false, false) + err := collection.Drop(s.Ctx) + if err != nil { + panic(err) + } + log.Printf("IndexDatasetData %s panic %v", mirRepo.TaskID, r) + } + }() + + // Prepare mirdatas: mirMetadatas, mirCks, gtAnnotations, predAnnotations. + gtAnnotations := map[string]*protos.SingleImageAnnotations{} + if mirAnnotations.GroundTruth != nil && len(mirAnnotations.GroundTruth.ImageAnnotations) > 0 { + gtAnnotations = mirAnnotations.GroundTruth.ImageAnnotations + } + predAnnotations := map[string]*protos.SingleImageAnnotations{} + if mirAnnotations.Prediction != nil && len(mirAnnotations.Prediction.ImageAnnotations) > 0 { + predAnnotations = mirAnnotations.Prediction.ImageAnnotations + } + mirCks := map[string]*protos.SingleImageCks{} + if mirAnnotations.ImageCks != nil && len(mirAnnotations.ImageCks) > 0 { + mirCks = mirAnnotations.ImageCks + } + + // Order assets, which is inpersistent in protobuf map. + assetIDs := make([]string, 0) + for assetID := range mirMetadatas.Attributes { + assetIDs = append(assetIDs, assetID) + } + sort.Strings(assetIDs) + mirAssetDetails := make([]interface{}, len(assetIDs)) + for idx, assetID := range assetIDs { + mirAssetDetails[idx] = s.buildMirAssetDetail(assetID, mirMetadatas, mirCks, gtAnnotations, predAnnotations) + } + if len(mirAssetDetails) > 0 { + _, err = collection.InsertMany(s.Ctx, mirAssetDetails) + if err != nil { + panic(err) + } + } + s.buildCollectionIndex(collection) + s.postIndexDatasetData(collection, collectionName) +} + +func (s *MongoServer) buildMirAssetDetail( + assetID string, + mirMetadatas *protos.MirMetadatas, + mirCks map[string]*protos.SingleImageCks, + gtAnnotations map[string]*protos.SingleImageAnnotations, + predAnnotations map[string]*protos.SingleImageAnnotations, +) *constants.MirAssetDetail { + mirAssetDetail := constants.NewMirAssetDetail() + mirAssetDetail.DocID = assetID + mirAssetDetail.AssetID = assetID + constants.BuildStructFromMessage(mirMetadatas.Attributes[assetID], &mirAssetDetail.MetaData) + if cks, ok := mirCks[assetID]; ok { + if len(cks.Cks) > 0 { + mirAssetDetail.Cks = cks.Cks + } + mirAssetDetail.Quality = cks.ImageQuality + } + + mapClassIDs := map[int32]bool{} + if gtAnnotation, ok := gtAnnotations[assetID]; ok { + for _, annotation := range gtAnnotation.Boxes { + annotationOut := constants.NewMirObjectAnnotation() + constants.BuildStructFromMessage(annotation, &annotationOut) + mirAssetDetail.Gt = append(mirAssetDetail.Gt, &annotationOut) + mapClassIDs[annotation.ClassId] = true + } + } + if predAnnotation, ok := predAnnotations[assetID]; ok { + for _, annotation := range predAnnotation.Boxes { + annotationOut := constants.NewMirObjectAnnotation() + constants.BuildStructFromMessage(annotation, &annotationOut) + mirAssetDetail.Pred = append(mirAssetDetail.Pred, &annotationOut) + mapClassIDs[annotation.ClassId] = true + } + } + + mirAssetDetail.JoinedClassIDs = make([]int32, 0, len(mapClassIDs)) + for k := range mapClassIDs { + mirAssetDetail.JoinedClassIDs = append(mirAssetDetail.JoinedClassIDs, k) + } + return &mirAssetDetail +} + +func (s *MongoServer) postIndexDatasetData(collection *mongo.Collection, collectionName string) { + defer tools.TimeTrack(time.Now(), collectionName) + + indexedMetadata := constants.IndexedDatasetMetadata{ + HistAssets: &map[string]*constants.MirHist{}, + HistAnnosGt: &map[string]*constants.MirHist{}, + HistAnnosPred: &map[string]*constants.MirHist{}, + Ready: true, + Exist: true, + } + for histKey, hist := range constants.ConstAssetsMirHist { + assetHist := hist + assetHist.BuildMirHist(s.queryHistogram( + collection, + assetHist.Ops, + assetHist.LowerBNDs, + "$metadata", + )) + (*indexedMetadata.HistAssets)[histKey] = &assetHist + } + for histKey, hist := range constants.ConstGtMirHist { + annoHist := hist + annoHist.BuildMirHist(s.queryHistogram( + collection, + annoHist.Ops, + annoHist.LowerBNDs, + "$gt", + )) + (*indexedMetadata.HistAnnosGt)[histKey] = &annoHist + } + for histKey, hist := range constants.ConstPredMirHist { + annoHist := hist + annoHist.BuildMirHist(s.queryHistogram( + collection, + annoHist.Ops, + annoHist.LowerBNDs, + "$pred", + )) + (*indexedMetadata.HistAnnosPred)[histKey] = &annoHist + } + metadataCollection := s.getMetadataCollection() + s.upsertDocument(metadataCollection, collectionName, indexedMetadata) +} + +func (s *MongoServer) buildCollectionIndex(collection *mongo.Collection) { + defer tools.TimeTrack(time.Now(), "") + + index := []mongo.IndexModel{ + { + Keys: bson.M{"asset_id": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"cks": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"class_ids": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"gt.class_id": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"pred.class_id": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"gt.cm": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"pred.cm": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"gt.tags": bsonx.Int32(1)}, Options: options.Index(), + }, + { + Keys: bson.M{"pred.tags": bsonx.Int32(1)}, Options: options.Index(), + }, + } + + opts := options.CreateIndexes().SetMaxTime(3600 * time.Second) + _, err := collection.Indexes().CreateMany(s.Ctx, index, opts) + if err != nil { + panic(err) + } +} + +func (s *MongoServer) countDatasetAssetsInClass( + collection *mongo.Collection, + queryField string, + classIDs []int, +) (int64, int64) { + if len(queryField) < 1 { + panic("invalid queryField in countDatasetAssetsInClass") + } + + cond := make([]bson.M, 0) + if len(classIDs) > 0 { + cond = append(cond, bson.M{"$match": bson.M{queryField: bson.M{"$in": classIDs}}}) + } + cond = append(cond, bson.M{"$group": bson.D{ + bson.E{Key: "_id", Value: nil}, + bson.E{Key: "count", Value: bson.M{"$sum": 1}}, + bson.E{Key: "sum", Value: bson.M{"$sum": bson.M{"$size": "$" + queryField}}}}}) + + aggreData := *s.aggregateMongoQuery(collection, cond) + if len(aggreData) < 1 { + return 0, 0 + } + return int64(aggreData[0]["count"].(int32)), int64(aggreData[0]["sum"].(int32)) +} + +func (s *MongoServer) QueryDatasetAssets( + mirRepo *constants.MirRepo, + offset int, + limit int, + classIDs []int, + annoTypes []string, + currentAssetID string, + cmTypes []int, + cks []string, + tags []string, +) *constants.QueryAssetsResult { + defer tools.TimeTrack(time.Now(), mirRepo.TaskID) + + _, ready := s.CheckDatasetExistenceReady(mirRepo) + if !ready { + panic("QueryDatasetAssets repo not ready: " + mirRepo.TaskID) + } + + log.Printf( + "Query offset: %d, limit: %d, classIDs: %v, annoTypes: %v, currentId: %s, cmTypes: %v cks: %v tags: %v\n", + offset, + limit, + classIDs, + annoTypes, + currentAssetID, + cmTypes, + cks, + tags, + ) + collection, _ := s.getRepoCollection(mirRepo) + + // "and" for inter-group, "or" for inner-group + filterAndConditions := bson.A{} + // class id in either field counts. + if len(classIDs) > 0 { + singleQuery := bson.M{"class_ids": bson.M{"$in": classIDs}} + filterAndConditions = append(filterAndConditions, singleQuery) + } + + if len(annoTypes) > 0 { + singleQuery := bson.M{annoTypes[0] + ".class_id": bson.M{"$exists": true}} + + // Both gt and pred. + if len(annoTypes) > 1 { + singleQuery = bson.M{"$or": bson.A{ + bson.M{"gt.class_id": bson.M{"$exists": true}}, + bson.M{"pred.class_id": bson.M{"$exists": true}}, + }} + } + filterAndConditions = append(filterAndConditions, singleQuery) + } + + if len(cmTypes) > 0 { + singleQuery := bson.M{"$or": bson.A{ + bson.M{"gt.cm": bson.M{"$in": cmTypes}}, + bson.M{"pred.cm": bson.M{"$in": cmTypes}}, + }} + filterAndConditions = append(filterAndConditions, singleQuery) + } + + // ck format "xxx" "xxx:" "xxx:yyy" + if len(cks) > 0 { + orConditions := bson.A{} + for _, ck := range cks { + ckStrs := strings.Split(ck, ":") + if len(ckStrs) > 2 || len(ckStrs) < 1 || len(ckStrs[0]) == 0 { + panic(fmt.Sprintf("invalid ck: %s", ck)) + } + + if len(ckStrs) == 1 || len(ckStrs[1]) == 0 { + // case "xxx:" or "xxx" + orConditions = append(orConditions, bson.M{"cks." + ckStrs[0]: bson.M{"$exists": true}}) + } else { + // case "xxx:yyy" + orConditions = append(orConditions, bson.M{"cks." + ckStrs[0]: ckStrs[1]}) + } + } + filterAndConditions = append(filterAndConditions, bson.M{"$or": orConditions}) + } + + // tag format "xxx" "xxx:" "xxx:yyy" + if len(tags) > 0 { + orConditions := bson.A{} + for _, tag := range tags { + tagStrs := strings.Split(tag, ":") + if len(tagStrs) > 2 || len(tagStrs) < 1 || len(tagStrs[0]) == 0 { + panic(fmt.Sprintf("invalid tag: %s", tag)) + } + + if len(tagStrs) == 1 || len(tagStrs[1]) == 0 { + // case "xxx:" or "xxx" + orConditions = append(orConditions, bson.M{"$or": bson.A{ + bson.M{"gt.tags." + tagStrs[0]: bson.M{"$exists": true}}, + bson.M{"pred.tags." + tagStrs[0]: bson.M{"$exists": true}}, + }}) + } else { + // case "xxx:yyy" + orConditions = append(orConditions, bson.M{"$or": bson.A{ + bson.M{"gt.tags." + tagStrs[0]: tagStrs[1]}, + bson.M{"pred.tags." + tagStrs[0]: tagStrs[1]}, + }}) + } + } + filterAndConditions = append(filterAndConditions, bson.M{"$or": orConditions}) + } + + filterQuery := bson.M{} + if len(filterAndConditions) > 0 { + filterQuery["$and"] = filterAndConditions + } + + // This is a special field, used as anchor. + if len(currentAssetID) > 0 { + filterQuery["asset_id"] = bson.M{"$gte": currentAssetID} + } + + pageOptions := options.Find().SetSort(bson.M{"asset_id": 1}).SetSkip(int64(offset)).SetLimit(int64(limit)) + queryCursor, err := collection.Find(s.Ctx, filterQuery, pageOptions) + if err != nil { + panic(err) + } + queryData := []constants.MirAssetDetail{} + if err = queryCursor.All(s.Ctx, &queryData); err != nil { + panic(err) + } + + remainingAssetsCount, err := collection.CountDocuments(s.Ctx, filterQuery, &options.CountOptions{}) + if err != nil { + panic(err) + } + // Delete anchor query, so as to calculate count of documents. + delete(filterQuery, "asset_id") + totalAssetsCount, err := collection.CountDocuments(s.Ctx, filterQuery, &options.CountOptions{}) + if err != nil { + panic(err) + } + anchor := totalAssetsCount - remainingAssetsCount + log.Printf("filterQuery: %+v totalAssetsCount: %d anchor: %d\n", filterQuery, totalAssetsCount, anchor) + + return &constants.QueryAssetsResult{ + AssetsDetail: queryData, + Offset: offset, + Limit: limit, + Anchor: anchor, + TotalAssetsCount: totalAssetsCount, + } +} + +func (s *MongoServer) queryHistogram( + collection *mongo.Collection, + ops interface{}, + lowerBNDs []float64, + unwindField string, +) *map[string]int32 { + buckets := map[string]int32{} + + // Mongodb treat last upper element as exclusive boundary, add delta to enclose it. + extendedLowerBNDs := lowerBNDs + extendedLowerBNDs = append(extendedLowerBNDs, lowerBNDs[len(lowerBNDs)-1]+0.01) + + cond := make([]bson.M, 0) + cond = append(cond, bson.M{"$unwind": unwindField}) + cond = append(cond, bson.M{"$bucket": bson.D{ + bson.E{ + Key: "groupBy", + Value: ops, + }, + bson.E{Key: "default", Value: "_others_"}, + bson.E{ + Key: "boundaries", + Value: extendedLowerBNDs, + }, + bson.E{Key: "output", Value: bson.M{"count": bson.M{"$sum": 1}}}, + }}) + + aggreData := s.aggregateMongoQuery(collection, cond) + for _, bucketMap := range *aggreData { + if buckedKey, ok := bucketMap["_id"].(float64); ok { + buckedKeyStr := fmt.Sprintf("%.2f", buckedKey) + buckets[buckedKeyStr] = bucketMap["count"].(int32) + } + } + return &buckets +} + +func (s *MongoServer) aggregateMongoQuery(collection *mongo.Collection, cond []bson.M) *[]map[string]interface{} { + showInfoCursor, err := collection.Aggregate(s.Ctx, cond) + if err != nil { + panic(err) + } + + var showsWithInfo []map[string]interface{} + if err = showInfoCursor.All(s.Ctx, &showsWithInfo); err != nil { + panic(err) + } + + return &showsWithInfo +} + +func (s *MongoServer) RemoveNonReadyDataset() { + collectionExistence := s.getMetadataCollection() + filter := bson.M{"ready": false, "exist": true} + queryCursor, err := collectionExistence.Find(s.Ctx, filter) + if err != nil { + panic(err) + } + queryDatas := []map[string]interface{}{} + if err = queryCursor.All(s.Ctx, &queryDatas); err != nil { + panic(err) + } + for _, record := range queryDatas { + collectionName := record["_id"].(string) + log.Printf(" Dropping non-ready collection %s", collectionName) + if err = s.mirDatabase.Collection(collectionName).Drop(s.Ctx); err != nil { + log.Panicf(" Fail to drop %s, error: %+v", collectionName, err) + } + s.setDatasetExistence(collectionName, false, false) + } + log.Printf("Dropped %d non-ready collections.", len(queryDatas)) +} + +func (s *MongoServer) loadDatasetMetaData( + mirRepo *constants.MirRepo) *constants.IndexedDatasetMetadata { + _, collectionName := s.getRepoCollection(mirRepo) + collectionExistence := s.getMetadataCollection() + filter := bson.M{"_id": collectionName} + metadata := &constants.IndexedDatasetMetadata{} + err := collectionExistence.FindOne(s.Ctx, filter).Decode(metadata) + if err != nil { + panic(err) + } + return metadata +} + +func (s *MongoServer) QueryDatasetStats( + mirRepo *constants.MirRepo, + classIDs []int, + requireAssetsHist bool, + requireAnnotationsHist bool, + queryData *constants.QueryDatasetStatsResult, +) *constants.QueryDatasetStatsResult { + _, ready := s.CheckDatasetExistenceReady(mirRepo) + if !ready { + panic("QueryDatasetStats repo not ready: " + mirRepo.TaskID) + } + collection, _ := s.getRepoCollection(mirRepo) + + // Build Query Context - hists request. + queryData.QueryContext.RequireAssetsHist = requireAssetsHist + queryData.QueryContext.RequireAnnotationsHist = requireAnnotationsHist + if requireAssetsHist || requireAnnotationsHist { + metadata := s.loadDatasetMetaData(mirRepo) + if requireAssetsHist { + queryData.AssetsHist = metadata.HistAssets + } + if requireAnnotationsHist { + queryData.Gt.AnnotationsHist = metadata.HistAnnosGt + queryData.Pred.AnnotationsHist = metadata.HistAnnosPred + } + } + + // Count negative samples in specific classes. + if len(classIDs) > 0 { + for k := range queryData.Gt.ClassIDsCount { + if !slices.Contains(classIDs, k) { + delete(queryData.Gt.ClassIDsCount, k) + } + } + for k := range queryData.Pred.ClassIDsCount { + if !slices.Contains(classIDs, k) { + delete(queryData.Pred.ClassIDsCount, k) + } + } + queryData.Gt.PositiveAssetsCount, queryData.Gt.AnnotationsCount = s.countDatasetAssetsInClass( + collection, + "gt.class_id", + classIDs, + ) + queryData.Gt.NegativeAssetsCount = queryData.TotalAssetsCount - queryData.Gt.PositiveAssetsCount + + queryData.Pred.PositiveAssetsCount, queryData.Pred.AnnotationsCount = s.countDatasetAssetsInClass( + collection, + "pred.class_id", + classIDs, + ) + queryData.Pred.NegativeAssetsCount = queryData.TotalAssetsCount - queryData.Pred.PositiveAssetsCount + } + + return queryData +} + +func (s *MongoServer) getMetricsCollection(collectionSuffix string) *mongo.Collection { + return s.metricsDatabase.Collection(s.metricsPrefix + collectionSuffix) +} +func (s *MongoServer) MetricsRecordSignals(collectionSuffix string, id string, data interface{}) { + s.upsertDocument(s.getMetricsCollection(collectionSuffix), id, data) +} + +func (s *MongoServer) MetricsQuerySignals( + collectionSuffix string, + userID string, + classIDs []int, + queryField string, + bucket string, + unit string, + limit int, +) *[]constants.MetricsQueryPoint { + collection := s.getMetricsCollection(collectionSuffix) + switch bucket { + case "count": + return s.metricsQueryByCount(collection, userID, classIDs, queryField, limit) + case "time": + return s.metricsQueryByTime(collection, userID, queryField, unit, limit) + default: + panic("invalid bucket value: " + bucket) + } +} + +func (s *MongoServer) metricsQueryByCount( + collection *mongo.Collection, + userID string, + classIDs []int, + queryField string, + limit int, +) *[]constants.MetricsQueryPoint { + cond := make([]bson.M, 0) + cond = append(cond, bson.M{"$unwind": "$" + queryField}) + cond = append(cond, bson.M{"$match": bson.M{"user_id": userID}}) + if len(classIDs) > 0 { + cond = append(cond, bson.M{"$match": bson.M{"class_ids": bson.M{"$in": classIDs}}}) + } + + cond = append(cond, bson.M{"$group": bson.D{ + bson.E{Key: "_id", Value: "$" + queryField}, + bson.E{Key: "value", Value: bson.M{"$sum": 1}}}}) + cond = append(cond, bson.M{"$sort": bson.M{"value": -1}}) + if limit <= 0 { + limit = 5 + } + cond = append(cond, bson.M{"$limit": limit}) + aggreData := s.aggregateMongoQuery(collection, cond) + + result := []constants.MetricsQueryPoint{} + for _, data := range *aggreData { + if queryKey, ok := data["_id"].(int32); ok { + queryKeyStr := fmt.Sprintf("%d", queryKey) + result = append( + result, + constants.MetricsQueryPoint{Legend: queryKeyStr, Count: int64(data["value"].(int32))}, + ) + } + } + + return &result +} + +func (s *MongoServer) metricsQueryByTime( + collection *mongo.Collection, + userID string, + queryField string, + unit string, + limit int, +) *[]constants.MetricsQueryPoint { + // for time aggregation. + unitDay := 0 + unitMonth := 0 + var lastBND time.Time + switch unit { + case "day": + unitDay = 1 + lastBND = now.BeginningOfDay() + case "week": + unitDay = 7 + lastBND = now.BeginningOfWeek().AddDate(0, 0, 1) // Start day on Monday. + case "month": + unitMonth = 1 + lastBND = now.BeginningOfMonth() + default: + panic("bucket:time expect unit in day/week/month, get: " + unit) + } + + boundries := []time.Time{} + buckets := map[string]int64{} + for idx := -limit + 1; idx <= 1; idx++ { + lowerBND := lastBND.AddDate(0, idx*unitMonth, idx*unitDay) + boundries = append(boundries, lowerBND) + + buckedKeyStr := lowerBND.Format("2006-01-02") + buckets[buckedKeyStr] = 0 + } + + cond := make([]bson.M, 0) + cond = append(cond, bson.M{"$match": bson.M{"user_id": userID}}) + cond = append(cond, bson.M{ + "$bucket": bson.D{ + bson.E{Key: "groupBy", Value: "$" + queryField}, + bson.E{Key: "default", Value: "_others_"}, + bson.E{Key: "boundaries", Value: boundries}, + bson.E{Key: "output", Value: bson.D{bson.E{Key: "count", Value: bson.M{"$sum": 1}}}}, + }}) + aggreData := s.aggregateMongoQuery(collection, cond) + for _, bucketMap := range *aggreData { + if timeStamp, ok := bucketMap["_id"].(primitive.DateTime); ok { + buckedKeyStr := time.Unix(0, int64(timeStamp)*int64(time.Millisecond)).Format("2006-01-02") + buckets[buckedKeyStr] = int64(bucketMap["count"].(int32)) + } + } + + result := []constants.MetricsQueryPoint{} + for _, boundary := range boundries[:len(boundries)-1] { + buckedKeyStr := boundary.Format("2006-01-02") + result = append(result, constants.MetricsQueryPoint{Legend: buckedKeyStr, Count: int64(buckets[buckedKeyStr])}) + } + + return &result +} diff --git a/ymir/backend/src/ymir_hel/common/db/mongodb/mongo_test.go b/ymir/backend/src/ymir_hel/common/db/mongodb/mongo_test.go new file mode 100644 index 0000000000..8ad4d5b1c0 --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/db/mongodb/mongo_test.go @@ -0,0 +1,499 @@ +package mongodb + +import ( + "context" + "encoding/json" + "testing" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/protos" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "go.mongodb.org/mongo-driver/bson" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/integration/mtest" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type MockDatabase struct { + mock.Mock +} + +func (m *MockDatabase) Collection(name string, opts ...*options.CollectionOptions) *mongo.Collection { + args := m.Called(name, opts) + return args.Get(0).(*mongo.Collection) +} + +func getDatasetMetadataName() string { + return "__collection_metadata__@" +} + +func TestSetDatasetExistenceSuccess(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + mt.AddMockResponses(mtest.CreateSuccessResponse()) + mongoServer.setDatasetExistence("", true, false) + }) +} + +func TestSetDatasetExistenceFailure(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("The code did not panic") + } + }() + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + mt.AddMockResponses(mtest.CreateWriteErrorsResponse(mtest.WriteError{})) + mongoServer.setDatasetExistence("", true, true) + }) +} + +func TestCheckDatasetExistenceSuccess(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + mirRepo := constants.MirRepo{} + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + mockMirDatabase.On("Collection", "@", []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + find := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "exist", Value: true}, {Key: "ready", Value: true}}) + mt.AddMockResponses(find) + existence, ready := mongoServer.CheckDatasetExistenceReady(&mirRepo) + assert.Equal(t, existence, true) + assert.Equal(t, ready, true) + }) +} + +func TestCheckDatasetExistenceFailure0(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + mirRepo := constants.MirRepo{} + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + existence, ready := mongoServer.CheckDatasetExistenceReady(&mirRepo) + assert.Equal(t, existence, false) + assert.Equal(t, ready, false) + }) +} + +func TestCheckDatasetExistenceFailure1(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + mirRepo := constants.MirRepo{} + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + mockMirDatabase.On("Collection", "@", []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + + find := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "exist", Value: false}, {Key: "ready", Value: false}}) + mt.AddMockResponses(find) + existence, ready := mongoServer.CheckDatasetExistenceReady(&mirRepo) + assert.Equal(t, existence, false) + assert.Equal(t, ready, false) + }) +} + +func TestCountAssetsInClassSuccess(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + collection := mt.Coll + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", "@", []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + + queryField := "abc" + expectedCount := int32(10) + classIDs := []int{0, 1} + first := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "count", Value: expectedCount}, {Key: "sum", Value: expectedCount}}) + second := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.NextBatch, + bson.D{{Key: "AssetID", Value: "aaa"}}) + killCursors := mtest.CreateCursorResponse(0, "a.b", mtest.NextBatch) + mt.AddMockResponses(first, second, killCursors) + count, _ := mongoServer.countDatasetAssetsInClass(collection, queryField, classIDs) + assert.Equal(t, int64(expectedCount), count) + }) +} + +func TestQueryAssetsSuccess(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + mirRepo := constants.MirRepo{} + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + mockMirDatabase.On("Collection", "@", []*options.CollectionOptions(nil)). + Return(mockCollection) + + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + + offset := 100 + limit := 10 + classIDs := []int{0, 1} + currentAssetID := "abc" + annoTypes := []string{"gt", "pred"} + cmTypes := []int{0, 1} + cks := []string{"a", "b:c"} + tags := []string{"x", "y:z"} + expectedCount := int64(0) + expectedResult := &constants.QueryAssetsResult{ + AssetsDetail: []constants.MirAssetDetail{{}, {}}, + Offset: offset, + Limit: limit, + Anchor: expectedCount, + TotalAssetsCount: expectedCount, + } + + find := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "exist", Value: true}, {Key: "ready", Value: true}}) + mt.AddMockResponses(find) + mt.AddMockResponses(find) + first := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{}) + second := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.NextBatch, + bson.D{{Key: "AssetID", Value: "aaa"}}) + killCursors := mtest.CreateCursorResponse(0, "a.b", mtest.NextBatch) + countCursor := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "n", Value: expectedCount}}) + mt.AddMockResponses(first, second, killCursors) // Find/All require a set of responses. + mt.AddMockResponses(countCursor) + mt.AddMockResponses(countCursor) + result := mongoServer.QueryDatasetAssets( + &mirRepo, + offset, + limit, + classIDs, + annoTypes, + currentAssetID, + cmTypes, + cks, + tags, + ) + assert.Equal(t, expectedResult, result) + }) +} + +func TestQueryDatasetStatsSuccess(t *testing.T) { + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + + mt.Run("success", func(mt *mtest.T) { + mirRepo := constants.MirRepo{} + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + mockMirDatabase.On("Collection", "@", []*options.CollectionOptions(nil)). + Return(mockCollection) + + mockMirContext := protos.MirContext{} + err := json.Unmarshal([]byte(`{ + "pred_stats": + { + "class_ids_cnt": + { + "0": 7, + "1": 8 + } + }, + "gt_stats": + { + "class_ids_cnt": + { + "0": 3, + "2": 3 + } + } + }`), &mockMirContext) + if err != nil { + panic(err) + } + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + + classIDs := []int{0, 1} + expectedCount := int32(1) + expectedResult := constants.NewQueryDatasetStatsResult() + expectedResult.TotalAssetsCount = 0 + expectedResult.Gt.ClassIDsCount[0] = 3 + expectedResult.Gt.ClassIDsCount[1] = 0 + expectedResult.Gt.AnnotationsCount = 1 + expectedResult.Gt.NegativeAssetsCount = -1 + expectedResult.Gt.PositiveAssetsCount = 1 + expectedResult.Pred.ClassIDsCount[0] = 7 + expectedResult.Pred.ClassIDsCount[1] = 8 + expectedResult.Pred.PositiveAssetsCount = 1 + expectedResult.Pred.NegativeAssetsCount = -1 + expectedResult.Pred.AnnotationsCount = 1 + expectedResult.QueryContext.RequireAssetsHist = false + expectedResult.QueryContext.RequireAnnotationsHist = false + + metaResult := expectedResult + metaResult.Gt.ClassIDsCount[2] = 5 + metaResult.Pred.ClassIDsCount[2] = 6 + + find := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "exist", Value: true}, {Key: "ready", Value: true}}) + mt.AddMockResponses(find) + countCursor := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "n", Value: int64(expectedCount)}}) + mt.AddMockResponses(countCursor) + first := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "count", Value: expectedCount}, {Key: "sum", Value: expectedCount}}) + second := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.NextBatch, + bson.D{{Key: "AssetID", Value: "aaa"}}) + killCursors := mtest.CreateCursorResponse(0, "a.b", mtest.NextBatch) + mt.AddMockResponses(first, second, killCursors) + mt.AddMockResponses(first, second, killCursors) + result := mongoServer.QueryDatasetStats(&mirRepo, classIDs, false, false, metaResult) + assert.Equal(t, expectedResult, result) + }) +} + +func TestLoadAssetsDetail(t *testing.T) { + mirRepo := constants.MirRepo{} + attributes := map[string]*protos.MetadataAttributes{ + "a": {}, + "b": {}, + "c": {}, + } + mirMetadatas := &protos.MirMetadatas{Attributes: attributes} + mirAnnotations := &protos.MirAnnotations{ + GroundTruth: &protos.SingleTaskAnnotations{ + ImageAnnotations: map[string]*protos.SingleImageAnnotations{ + "a": {Boxes: []*protos.ObjectAnnotation{{ClassId: 1}}}, + }, + }, + Prediction: &protos.SingleTaskAnnotations{ + ImageAnnotations: map[string]*protos.SingleImageAnnotations{ + "a": {Boxes: []*protos.ObjectAnnotation{{ClassId: 1}}}, + }, + }, + ImageCks: map[string]*protos.SingleImageCks{"a": {Cks: map[string]string{"abc": "1"}}}, + } + + expectedAssetsDetail := []constants.MirAssetDetail{} + err := json.Unmarshal([]byte(`[ + { + "asset_id": "a", + "metadata": + { + "asset_type": 0, + "byte_size": 0, + "height": 0, + "image_channels": 0, + "timestamp": null, + "tvt_type": 0, + "width": 0, + "origin_filename": "" + }, + "class_ids": + [ + 1 + ], + "gt": + [ + { + "anno_quality": 0, + "box": null, + "class_id": 1, + "class_name": "", + "cm": 0, + "det_link_id": 0, + "index": 0, + "polygon": [], + "score": 0, + "tags": {} + } + ], + "pred": + [ + { + "anno_quality": 0, + "box": null, + "class_id": 1, + "polygon": [], + "class_name": "", + "cm": 0, + "det_link_id": 0, + "index": 0, + "score": 0, + "tags": {} + } + ], + "cks": + { + "abc": "1" + }, + "image_quality": 0 + }, + { + "asset_id": "b", + "metadata": + { + "asset_type": 0, + "byte_size": 0, + "height": 0, + "image_channels": 0, + "timestamp": null, + "tvt_type": 0, + "width": 0, + "origin_filename": "" + }, + "class_ids": + [], + "gt": + [], + "pred": + [], + "cks": + {}, + "image_quality": -1 + }, + { + "asset_id": "c", + "metadata": + { + "asset_type": 0, + "byte_size": 0, + "height": 0, + "image_channels": 0, + "timestamp": null, + "tvt_type": 0, + "width": 0, + "origin_filename": "" + }, + "class_ids": + [], + "gt": + [], + "pred": + [], + "cks": + {}, + "image_quality": -1 + } + ]`), &expectedAssetsDetail) + if err != nil { + panic(err) + } + + mt := mtest.New(t, mtest.NewOptions().ClientType(mtest.Mock)) + defer mt.Close() + mt.Run("success", func(mt *mtest.T) { + mockCollection := mt.Coll + mockMirDatabase := MockDatabase{} + mockMetricsDatabase := MockDatabase{} + mockMirDatabase.On("Collection", "@", []*options.CollectionOptions(nil)). + Return(mockCollection) + mockMirDatabase.On("Collection", getDatasetMetadataName(), []*options.CollectionOptions(nil)). + Return(mockCollection) + + find := mtest.CreateCursorResponse( + 1, + "a.b", + mtest.FirstBatch, + bson.D{{Key: "exist", Value: false}, {Key: "ready", Value: false}}) + mt.AddMockResponses(find) + mt.AddMockResponses( + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + mtest.CreateSuccessResponse(), + ) + mongoServer := NewMongoServer(context.Background(), &mockMirDatabase, &mockMetricsDatabase) + mongoServer.IndexDatasetData(&mirRepo, mirMetadatas, mirAnnotations) + }) +} diff --git a/ymir/backend/src/ymir_hel/common/loader/loader.go b/ymir/backend/src/ymir_hel/common/loader/loader.go new file mode 100644 index 0000000000..79f31fdb6f --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/loader/loader.go @@ -0,0 +1,70 @@ +package loader + +import ( + "gopkg.in/yaml.v3" + + "github.com/vektra/gitreader" + "google.golang.org/protobuf/proto" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/protos" +) + +type MirRepoLoader struct { +} + +func (l *MirRepoLoader) LoadSingleMirData( + mirRepo *constants.MirRepo, + mirFile constants.MirFile, +) interface{} { + return l.LoadMutipleMirDatas(mirRepo, []constants.MirFile{mirFile})[0] +} + +func (l *MirRepoLoader) LoadMutipleMirDatas( + mirRepo *constants.MirRepo, + mirFiles []constants.MirFile, +) []interface{} { + mirRoot, mirRev := mirRepo.BuildRepoID() + + repo, err := gitreader.OpenRepo(mirRoot) + if err != nil { + panic(err) + } + + var sliceDatas = make([]interface{}, len(mirFiles)) + for i, mirFile := range mirFiles { + blob, err := repo.CatFile(mirRev, mirFile.String()) + if err != nil { + panic(err) + } + bytes, err := blob.Bytes() + if err != nil { + panic(err) + } + newData := mirFile.ProtoData() + err = proto.Unmarshal(bytes, newData) + if err != nil { + panic(err) + } + sliceDatas[i] = newData + } + repo.Close() + + return sliceDatas +} + +func (l *MirRepoLoader) LoadModelInfo(mirRepo *constants.MirRepo) *constants.MirdataModel { + mirTasks := l.LoadSingleMirData(mirRepo, constants.MirfileTasks).(*protos.MirTasks) + task := mirTasks.Tasks[mirTasks.HeadTaskId] + modelData := constants.NewMirdataModel(task.SerializedTaskParameters) + if task.SerializedExecutorConfig != "" { + if err := yaml.Unmarshal([]byte(task.SerializedExecutorConfig), &modelData.ExecutorConfig); err != nil { + panic(err) + } + } + constants.BuildStructFromMessage(task.Model, &modelData) + if len(modelData.ModelHash) < 1 { + panic("invalid model") + } + return modelData +} diff --git a/ymir/backend/src/ymir_hel/common/loader/loader_test.go b/ymir/backend/src/ymir_hel/common/loader/loader_test.go new file mode 100644 index 0000000000..9f0a72a493 --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/loader/loader_test.go @@ -0,0 +1,102 @@ +package loader + +import ( + "fmt" + "os" + "os/exec" + "path" + "testing" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/protos" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/proto" +) + +func createTestMirRepo(sandbox string) *constants.MirRepo { + return &constants.MirRepo{ + SandboxRoot: sandbox, + UserID: "user", + RepoID: "repo", + BranchID: "branch", + TaskID: "task", + } +} + +func createGitRepo(t *testing.T, repoRoot string, fileContents map[string][]byte, tagName string) { + os.RemoveAll(repoRoot) + err := exec.Command("mkdir", "-p", repoRoot).Run() + if err != nil { + panic(err) + } + + cmd := exec.Command("git", "init") + cmd.Dir = repoRoot + err = cmd.Run() + if err != nil { + panic(err) + } + + for fileName, fileContent := range fileContents { + absFileName := path.Join(repoRoot, fileName) + err = os.WriteFile(absFileName, fileContent, 0777) + if err != nil { + panic(err) + } + } + + cmd = exec.Command("git", "add", ".") + cmd.Dir = repoRoot + err = cmd.Run() + if err != nil { + panic(err) + } + + cmd = exec.Command("git", "commit", "-m", "\"msg\"") + cmd.Dir = repoRoot + err = cmd.Run() + if err != nil { + panic(err) + } + + cmd = exec.Command("git", "tag", tagName) + cmd.Dir = repoRoot + err = cmd.Run() + if err != nil { + panic(err) + } +} + +func TestLoadModelInfo(t *testing.T) { + workDir := fmt.Sprintf("%s/modelinfo", "/tmp/test1") + // workDir := fmt.Sprintf("%s/modelinfo", t.TempDir()) + mirRepo := createTestMirRepo(workDir) + mirRoot, mirRev := mirRepo.BuildRepoID() + + headTaskID := "task_id" + mirTaskMap := map[string]*protos.Task{} + mirTaskMap[headTaskID] = &protos.Task{ + Model: &protos.ModelMeta{ + ModelHash: "model_hash", + MeanAveragePrecision: 0.42, + Context: "context", + BestStageName: "best_stage", + }, + SerializedExecutorConfig: "{abc: 1}", + } + mirTasks := protos.MirTasks{Tasks: mirTaskMap, HeadTaskId: headTaskID} + encodedData, _ := proto.Marshal(&mirTasks) + createGitRepo(t, mirRoot, map[string][]byte{"tasks.mir": encodedData}, mirRev) + + expectedModel := &constants.MirdataModel{ + ModelHash: "model_hash", + Stages: map[string]interface{}{}, + MeanAveragePrecision: 0.42, + Context: "context", + BestStageName: "best_stage", + ExecutorConfig: map[string]interface{}{"abc": 1}} + + mirRepoLoader := MirRepoLoader{} + mirModel := mirRepoLoader.LoadModelInfo(mirRepo) + assert.Equal(t, expectedModel, mirModel) +} diff --git a/ymir/backend/src/ymir_hel/common/tools/timer.go b/ymir/backend/src/ymir_hel/common/tools/timer.go new file mode 100644 index 0000000000..9456e0d48c --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/tools/timer.go @@ -0,0 +1,24 @@ +package tools + +import ( + "log" + "regexp" + "runtime" + "time" +) + +func TimeTrack(start time.Time, output string) { + elapsed := time.Since(start) + + // Skip this function, and fetch the PC and file for its parent. + pc, _, _, _ := runtime.Caller(1) + + // Retrieve a function object this functions parent. + funcObj := runtime.FuncForPC(pc) + + // Regex to extract just the function name (and not the module path). + runtimeFunc := regexp.MustCompile(`^.*\.(.*)$`) + name := runtimeFunc.ReplaceAllString(funcObj.Name(), "$1") + + log.Printf("%s %s took %s\n", name, output, elapsed) +} diff --git a/ymir/backend/src/ymir_hel/common/tools/timer_test.go b/ymir/backend/src/ymir_hel/common/tools/timer_test.go new file mode 100644 index 0000000000..dab2ff156c --- /dev/null +++ b/ymir/backend/src/ymir_hel/common/tools/timer_test.go @@ -0,0 +1,10 @@ +package tools + +import ( + "testing" + "time" +) + +func TestTimeTrack(t *testing.T) { + TimeTrack(time.Now(), "test") +} diff --git a/ymir/backend/src/ymir_hel/configs/config.go b/ymir/backend/src/ymir_hel/configs/config.go new file mode 100644 index 0000000000..cba0ffe2be --- /dev/null +++ b/ymir/backend/src/ymir_hel/configs/config.go @@ -0,0 +1,88 @@ +package configs + +import ( + "fmt" + "log" + + "github.com/spf13/viper" +) + +type Config struct { + YmirSandbox string + + ViewerHost string + ViewerPort int + ViewerURI string + ViewerTimeout int32 + + MongoDBURI string + MongoDataDBName string + MongoDataDBCache bool + MongoMetricsDBName string + + // Hel-grpc config + HelGrpcHost string + HelGrpcPort int + HelGrpcURL string + + // Redis + RedisURLHel string + RedisNumHelGrpc int + RedisURLHelGrpc string + RedisNumHelTask int + RedisURLHelTask string +} + +func InitViperConfig(configFile string) *Config { + err := viper.BindEnv("YmirSandbox", "BACKEND_SANDBOX_ROOT") + if err != nil { + panic(err) + } + + err = viper.BindEnv("ViewerPort", "VIEWER_HOST_PORT") + if err != nil { + panic(err) + } + err = viper.BindEnv("MongoDBURI", "MONGODB_URI") + if err != nil { + panic(err) + } + err = viper.BindEnv("MongoDataDBCache", "MONGODB_USE_CACHE") + if err != nil { + panic(err) + } + + err = viper.BindEnv("HelGrpcPort", "HEL_GRPC_PORT") + if err != nil { + panic(err) + } + err = viper.BindEnv("RedisURLHel", "REDIS_URL_HEL") + if err != nil { + panic(err) + } + + viper.SetConfigFile(configFile) + err = viper.ReadInConfig() + if err != nil { + panic(err) + } + + var config Config + err = viper.Unmarshal(&config) + if err != nil { + panic(err) + } + + if len(config.ViewerURI) < 1 { + config.ViewerURI = fmt.Sprintf("%s:%d", config.ViewerHost, config.ViewerPort) + } + if len(config.HelGrpcURL) < 1 { + config.HelGrpcURL = fmt.Sprintf("%s:%d", config.HelGrpcHost, config.HelGrpcPort) + } + + config.RedisURLHelGrpc = fmt.Sprintf("%s:%d", config.RedisURLHel, config.RedisNumHelGrpc) + config.RedisURLHelTask = fmt.Sprintf("%s:%d", config.RedisURLHel, config.RedisNumHelTask) + + log.Printf("ymir-hel config: %+v\n", config) + return &config +} diff --git a/ymir/backend/src/ymir_hel/configs/config.yml b/ymir/backend/src/ymir_hel/configs/config.yml new file mode 100644 index 0000000000..72f88324e1 --- /dev/null +++ b/ymir/backend/src/ymir_hel/configs/config.yml @@ -0,0 +1,28 @@ +# Common +YmirSandbox: "" + +# Viewer +ViewerHost: "0.0.0.0" +ViewerPort: 9527 +# ViewerURI has higher priority than above fields, if set. +ViewerURI: "" +ViewerTimeout: 61 # +1 for elder, in seconds. + +# MongoDBs +MongoDBURI: "mongodb://127.0.0.1:27017" +MongoDataDBName: "YMIR-data" +MongoDataDBCache: true +MongoMetricsDBName: "YMIR-metrics" + +# Hel-grpc +HelGrpcHost: "0.0.0.0" +HelGrpcPort: 9542 +# HelURI has higher priority than above fields, if set. +HelGrpcURL: "" + +# Redis +RedisURLHel: "redis://redis:6379" +RedisNumHelGrpc: 12 +RedisURLHelGrpc: "" +RedisNumHelTask: 14 +RedisURLHelTask: "" diff --git a/ymir/backend/src/ymir_hel/configs/config_test.go b/ymir/backend/src/ymir_hel/configs/config_test.go new file mode 100644 index 0000000000..e67ceb6090 --- /dev/null +++ b/ymir/backend/src/ymir_hel/configs/config_test.go @@ -0,0 +1,23 @@ +package configs + +import ( + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestViperConfig(t *testing.T) { + fakeURI := "fake_uri" + os.Setenv("MONGODB_URI", fakeURI) + fakeSandbox := "fake_sandbox" + os.Setenv("BACKEND_SANDBOX_ROOT", fakeSandbox) + fakeHostPort := 1001 + os.Setenv("VIEWER_HOST_PORT", fmt.Sprintf("%d", fakeHostPort)) + + config := InitViperConfig("./config.yml") + assert.Equal(t, fakeURI, config.MongoDBURI) + assert.Equal(t, fakeSandbox, config.YmirSandbox) + assert.Equal(t, fakeHostPort, config.ViewerPort) +} diff --git a/ymir/backend/src/ymir_hel/go.mod b/ymir/backend/src/ymir_hel/go.mod new file mode 100644 index 0000000000..a17ea7b6d5 --- /dev/null +++ b/ymir/backend/src/ymir_hel/go.mod @@ -0,0 +1,92 @@ +module github.com/IndustryEssentials/ymir-hel + +go 1.18 + +require ( + github.com/gin-gonic/gin v1.8.1 + github.com/jinzhu/now v1.1.5 + github.com/mitchellh/mapstructure v1.5.0 + github.com/penglongli/gin-metrics v0.1.10 + github.com/spf13/viper v1.12.0 + github.com/stretchr/testify v1.8.0 + github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe + github.com/swaggo/gin-swagger v1.5.2 + github.com/swaggo/swag v1.8.4 + github.com/urfave/cli v1.22.10 + github.com/vektra/gitreader v0.0.0-20150106002542-abd11557f68c + go.mongodb.org/mongo-driver v1.10.1 + google.golang.org/grpc v1.46.2 + google.golang.org/protobuf v1.28.1 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d // indirect + github.com/russross/blackfriday/v2 v2.0.1 // indirect + github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect + google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect +) + +require ( + github.com/KyleBanks/depth v1.2.1 // indirect + github.com/PuerkitoBio/purell v1.1.1 // indirect + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bits-and-blooms/bitset v1.2.0 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/edsrzf/mmap-go v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.19.6 // indirect + github.com/go-openapi/spec v0.20.4 // indirect + github.com/go-openapi/swag v0.19.15 // indirect + github.com/go-playground/locales v0.14.0 // indirect + github.com/go-playground/universal-translator v0.18.0 // indirect + github.com/go-playground/validator/v10 v10.11.0 // indirect + github.com/goccy/go-json v0.9.11 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/golang/snappy v0.0.1 // indirect + github.com/google/go-cmp v0.5.8 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.13.6 // indirect + github.com/leodido/go-urn v1.2.1 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mailru/easyjson v0.7.6 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.3 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_golang v1.12.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/objx v0.4.0 // indirect + github.com/subosito/gotenv v1.3.0 // indirect + github.com/ugorji/go/codec v1.2.7 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/scram v1.1.1 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 // indirect + golang.org/x/exp v0.0.0-20221006183845-316c7553db56 + golang.org/x/net v0.0.0-20220812174116-3211cb980234 // indirect + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect + golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/tools v0.1.12 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect +) diff --git a/ymir/backend/src/ymir_hel/go.sum b/ymir/backend/src/ymir_hel/go.sum new file mode 100644 index 0000000000..62ab7cefd7 --- /dev/null +++ b/ymir/backend/src/ymir_hel/go.sum @@ -0,0 +1,766 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= +github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4= +github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.4/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= +github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= +github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= +github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= +github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe h1:iruDEfMl2E6fbMZ9s0scYfZQ84/6SPL6zC8ACM2oIL0= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.3 h1:h9JoA60e1dVEOpp0PFwJSmt1Htu057NUq9/bUwaO61s= +github.com/pelletier/go-toml/v2 v2.0.3/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= +github.com/penglongli/gin-metrics v0.1.10 h1:mNNWCM3swMOVHwzrHeXsE4C/myu8P/HIFohtyMi9rN8= +github.com/penglongli/gin-metrics v0.1.10/go.mod h1:wxGsGUwpVGv3hmYSxQn2GZgRL3YuCgiRFq2d0X6+EOU= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg= +github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe h1:K8pHPVoTgxFJt1lXuIzzOX7zZhZFldJQK/CgKx9BFIc= +github.com/swaggo/files v0.0.0-20220610200504-28940afbdbfe/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= +github.com/swaggo/gin-swagger v1.5.2 h1:dj2es17EaOHoy0Owu4xn3An1mI8/xjdFyIH6KAbOdYo= +github.com/swaggo/gin-swagger v1.5.2/go.mod h1:Cbj/MlHApPOjZdf4joWFXLLgmZVPyh54GPvPPyVjVZM= +github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ= +github.com/swaggo/swag v1.8.4 h1:oGB351qH1JqUqK1tsMYEE5qTBbPk394BhsZxmUfebcI= +github.com/swaggo/swag v1.8.4/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/urfave/cli v1.22.10 h1:p8Fspmz3iTctJstry1PYS3HVdllxnEzTEsgIgtxTrCk= +github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/vektra/gitreader v0.0.0-20150106002542-abd11557f68c h1:uZS0bSmPEYQScULWaelJMPihrzWMiCl5fUfW520BBT0= +github.com/vektra/gitreader v0.0.0-20150106002542-abd11557f68c/go.mod h1:2gXNohtg9jktzjmRUZj0tfr7eKhN6UJLC055b2TpmHI= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.mongodb.org/mongo-driver v1.10.1 h1:NujsPveKwHaWuKUer/ceo9DzEe7HIj1SlJ6uvXZG0S4= +go.mongodb.org/mongo-driver v1.10.1/go.mod h1:z4XpeoU6w+9Vht+jAFyLgVrD+jGSQQe0+CBWFHNiHt8= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8 h1:GIAS/yBem/gq2MUqgNIzUHW7cJMmx3TGZOrnyYaNQ6c= +golang.org/x/crypto v0.0.0-20220817201139-bc19a97f63c8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20221006183845-316c7553db56 h1:BrYbdKcCNjLyrN6aKqXy4hPw9qGI8IATkj4EWv9Q+kQ= +golang.org/x/exp v0.0.0-20221006183845-316c7553db56/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220812174116-3211cb980234 h1:RDqmgfe7SvlMWoqC3xwQ2blLO3fcWcxMa3eBLRdRW7E= +golang.org/x/net v0.0.0-20220812174116-3211cb980234/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220818161305-2296e01440c6 h1:Sx/u41w+OwrInGdEckYmEuU5gHoGSL4QbDz3S9s6j4U= +golang.org/x/sys v0.0.0-20220818161305-2296e01440c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/ymir/backend/src/ymir_hel/grpc/server/grpc_client.go b/ymir/backend/src/ymir_hel/grpc/server/grpc_client.go new file mode 100644 index 0000000000..1f97d026e9 --- /dev/null +++ b/ymir/backend/src/ymir_hel/grpc/server/grpc_client.go @@ -0,0 +1,33 @@ +package server + +import ( + "context" + "log" + "time" + + "github.com/IndustryEssentials/ymir-hel/protos" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func GrpcClientCall(addr string) error { + // Set up a connection to the server. + conn, err := grpc.Dial(addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + log.Fatalf("did not connect: %v", err) + return err + } + defer conn.Close() + c := protos.NewMirControllerServiceClient(conn) + + // Contact the server and print out its response. + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + r, err := c.DataManageRequest(ctx, &protos.GeneralReq{UserId: "0001"}) + if err != nil { + log.Fatalf("serverice fail: %v", err) + return err + } + log.Printf("Succeed: %s", r.GetMessage()) + return nil +} diff --git a/ymir/backend/src/ymir_hel/grpc/server/grpc_server.go b/ymir/backend/src/ymir_hel/grpc/server/grpc_server.go new file mode 100644 index 0000000000..cee35a8118 --- /dev/null +++ b/ymir/backend/src/ymir_hel/grpc/server/grpc_server.go @@ -0,0 +1,32 @@ +package server + +import ( + "context" + "log" + "net" + + "github.com/IndustryEssentials/ymir-hel/configs" + "github.com/IndustryEssentials/ymir-hel/protos" + "google.golang.org/grpc" +) + +type HelGrpcServer struct{} + +func (s HelGrpcServer) DataManageRequest(ctx context.Context, request *protos.GeneralReq) (*protos.GeneralResp, error) { + log.Printf("Hel-gRPC server is called with request: \n%+v", request) + return &protos.GeneralResp{Code: 1, Message: "response"}, nil +} + +func StartGrpcService(config *configs.Config) error { + lis, err := net.Listen("tcp", config.HelGrpcURL) + if err != nil { + log.Fatalf("failed to listen: %v", err) + } + s := grpc.NewServer() + log.Printf("Hel-gRPC server is running at %s.", config.HelGrpcURL) + protos.RegisterMirControllerServiceServer(s, HelGrpcServer{}) + if err := s.Serve(lis); err != nil { + log.Fatalf("failed to serve: %v", err) + } + return nil +} diff --git a/ymir/backend/src/ymir_hel/main.go b/ymir/backend/src/ymir_hel/main.go new file mode 100644 index 0000000000..b3a134d9f2 --- /dev/null +++ b/ymir/backend/src/ymir_hel/main.go @@ -0,0 +1,63 @@ +package main + +import ( + "os" + + "github.com/urfave/cli" + + "github.com/IndustryEssentials/ymir-hel/configs" + server_grpc "github.com/IndustryEssentials/ymir-hel/grpc/server" + server_viewer "github.com/IndustryEssentials/ymir-hel/viewer/server" +) + +func BuildCliApp(config *configs.Config) (*cli.App, error) { + app := cli.NewApp() + app.Commands = []cli.Command{ + { + Name: "viewer", + Usage: "start YMIR-Viewer Service.", + Action: func(c *cli.Context) error { + if err := server_viewer.StartViewerServer(config); err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil + }, + }, + { + Name: "grpc_service", + Usage: "launch YMIR-Hel Grpc Service.", + Action: func(c *cli.Context) error { + if err := server_grpc.StartGrpcService(config); err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil + }, + }, + { + Name: "grpc_client", + Usage: "call YMIR-Hel Grpc Service.", + Action: func(c *cli.Context) error { + if err := server_grpc.GrpcClientCall(config.HelGrpcURL); err != nil { + return cli.NewExitError(err.Error(), 1) + } + return nil + }, + }, + } + + return app, nil +} + +func main() { + helConfig := configs.InitViperConfig("configs/config.yml") + + app, err := BuildCliApp(helConfig) + if err != nil { + panic(err) + } + // Run the CLI app + err = app.Run(os.Args) + if err != nil { + panic(err) + } +} diff --git a/ymir/backend/src/ymir_hel/protos/backend.pb.go b/ymir/backend/src/ymir_hel/protos/backend.pb.go new file mode 100644 index 0000000000..b0ed5a506a --- /dev/null +++ b/ymir/backend/src/ymir_hel/protos/backend.pb.go @@ -0,0 +1,2143 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.15.8 +// source: backend.proto + +package protos + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MergeStrategy int32 + +const ( + MergeStrategy_STOP MergeStrategy = 0 + MergeStrategy_HOST MergeStrategy = 1 + MergeStrategy_GUEST MergeStrategy = 2 +) + +// Enum value maps for MergeStrategy. +var ( + MergeStrategy_name = map[int32]string{ + 0: "STOP", + 1: "HOST", + 2: "GUEST", + } + MergeStrategy_value = map[string]int32{ + "STOP": 0, + "HOST": 1, + "GUEST": 2, + } +) + +func (x MergeStrategy) Enum() *MergeStrategy { + p := new(MergeStrategy) + *p = x + return p +} + +func (x MergeStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MergeStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_backend_proto_enumTypes[0].Descriptor() +} + +func (MergeStrategy) Type() protoreflect.EnumType { + return &file_backend_proto_enumTypes[0] +} + +func (x MergeStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MergeStrategy.Descriptor instead. +func (MergeStrategy) EnumDescriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{0} +} + +type UnknownTypesStrategy int32 + +const ( + UnknownTypesStrategy_UTS_STOP UnknownTypesStrategy = 0 + UnknownTypesStrategy_UTS_IGNORE UnknownTypesStrategy = 1 + UnknownTypesStrategy_UTS_ADD UnknownTypesStrategy = 2 +) + +// Enum value maps for UnknownTypesStrategy. +var ( + UnknownTypesStrategy_name = map[int32]string{ + 0: "UTS_STOP", + 1: "UTS_IGNORE", + 2: "UTS_ADD", + } + UnknownTypesStrategy_value = map[string]int32{ + "UTS_STOP": 0, + "UTS_IGNORE": 1, + "UTS_ADD": 2, + } +) + +func (x UnknownTypesStrategy) Enum() *UnknownTypesStrategy { + p := new(UnknownTypesStrategy) + *p = x + return p +} + +func (x UnknownTypesStrategy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UnknownTypesStrategy) Descriptor() protoreflect.EnumDescriptor { + return file_backend_proto_enumTypes[1].Descriptor() +} + +func (UnknownTypesStrategy) Type() protoreflect.EnumType { + return &file_backend_proto_enumTypes[1] +} + +func (x UnknownTypesStrategy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UnknownTypesStrategy.Descriptor instead. +func (UnknownTypesStrategy) EnumDescriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{1} +} + +type RequestType int32 + +const ( + RequestType_UNKNOWN RequestType = 0 + // CMD task + RequestType_CMD_BRANCH_DEL RequestType = 1 + RequestType_CMD_BRANCH_LIST RequestType = 2 + RequestType_CMD_BRANCH_CHECKOUT RequestType = 3 + RequestType_CMD_BRANCH_CREATE RequestType = 4 + RequestType_CMD_CLONE RequestType = 5 + RequestType_CMD_COMMIT RequestType = 6 + RequestType_CMD_FILTER RequestType = 7 + RequestType_CMD_INIT RequestType = 8 + RequestType_CMD_LOG RequestType = 9 + RequestType_CMD_MERGE RequestType = 10 + RequestType_CMD_INFERENCE RequestType = 11 + RequestType_CMD_LABEL_ADD RequestType = 12 + RequestType_CMD_LABEL_GET RequestType = 13 + RequestType_CMD_TERMINATE RequestType = 14 + RequestType_CMD_PULL_IMAGE RequestType = 16 + RequestType_CMD_GPU_INFO_GET RequestType = 17 + RequestType_CMD_SAMPLING RequestType = 18 + RequestType_CMD_EVALUATE RequestType = 19 + RequestType_CMD_REPO_CHECK RequestType = 20 + RequestType_CMD_REPO_CLEAR RequestType = 21 + RequestType_CMD_VERSIONS_GET RequestType = 22 + // Sandbox path operation + RequestType_USER_LIST RequestType = 101 + RequestType_USER_CREATE RequestType = 102 + RequestType_USER_REMOVE RequestType = 103 + RequestType_REPO_LIST RequestType = 104 + RequestType_REPO_CREATE RequestType = 105 + RequestType_REPO_REMOVE RequestType = 106 + // Long task + RequestType_TASK_CREATE RequestType = 1001 +) + +// Enum value maps for RequestType. +var ( + RequestType_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CMD_BRANCH_DEL", + 2: "CMD_BRANCH_LIST", + 3: "CMD_BRANCH_CHECKOUT", + 4: "CMD_BRANCH_CREATE", + 5: "CMD_CLONE", + 6: "CMD_COMMIT", + 7: "CMD_FILTER", + 8: "CMD_INIT", + 9: "CMD_LOG", + 10: "CMD_MERGE", + 11: "CMD_INFERENCE", + 12: "CMD_LABEL_ADD", + 13: "CMD_LABEL_GET", + 14: "CMD_TERMINATE", + 16: "CMD_PULL_IMAGE", + 17: "CMD_GPU_INFO_GET", + 18: "CMD_SAMPLING", + 19: "CMD_EVALUATE", + 20: "CMD_REPO_CHECK", + 21: "CMD_REPO_CLEAR", + 22: "CMD_VERSIONS_GET", + 101: "USER_LIST", + 102: "USER_CREATE", + 103: "USER_REMOVE", + 104: "REPO_LIST", + 105: "REPO_CREATE", + 106: "REPO_REMOVE", + 1001: "TASK_CREATE", + } + RequestType_value = map[string]int32{ + "UNKNOWN": 0, + "CMD_BRANCH_DEL": 1, + "CMD_BRANCH_LIST": 2, + "CMD_BRANCH_CHECKOUT": 3, + "CMD_BRANCH_CREATE": 4, + "CMD_CLONE": 5, + "CMD_COMMIT": 6, + "CMD_FILTER": 7, + "CMD_INIT": 8, + "CMD_LOG": 9, + "CMD_MERGE": 10, + "CMD_INFERENCE": 11, + "CMD_LABEL_ADD": 12, + "CMD_LABEL_GET": 13, + "CMD_TERMINATE": 14, + "CMD_PULL_IMAGE": 16, + "CMD_GPU_INFO_GET": 17, + "CMD_SAMPLING": 18, + "CMD_EVALUATE": 19, + "CMD_REPO_CHECK": 20, + "CMD_REPO_CLEAR": 21, + "CMD_VERSIONS_GET": 22, + "USER_LIST": 101, + "USER_CREATE": 102, + "USER_REMOVE": 103, + "REPO_LIST": 104, + "REPO_CREATE": 105, + "REPO_REMOVE": 106, + "TASK_CREATE": 1001, + } +) + +func (x RequestType) Enum() *RequestType { + p := new(RequestType) + *p = x + return p +} + +func (x RequestType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RequestType) Descriptor() protoreflect.EnumDescriptor { + return file_backend_proto_enumTypes[2].Descriptor() +} + +func (RequestType) Type() protoreflect.EnumType { + return &file_backend_proto_enumTypes[2] +} + +func (x RequestType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RequestType.Descriptor instead. +func (RequestType) EnumDescriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{2} +} + +type AnnotationType int32 + +const ( + AnnotationType_NOT_SET AnnotationType = 0 + AnnotationType_GT AnnotationType = 1 + AnnotationType_PRED AnnotationType = 2 +) + +// Enum value maps for AnnotationType. +var ( + AnnotationType_name = map[int32]string{ + 0: "NOT_SET", + 1: "GT", + 2: "PRED", + } + AnnotationType_value = map[string]int32{ + "NOT_SET": 0, + "GT": 1, + "PRED": 2, + } +) + +func (x AnnotationType) Enum() *AnnotationType { + p := new(AnnotationType) + *p = x + return p +} + +func (x AnnotationType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AnnotationType) Descriptor() protoreflect.EnumDescriptor { + return file_backend_proto_enumTypes[3].Descriptor() +} + +func (AnnotationType) Type() protoreflect.EnumType { + return &file_backend_proto_enumTypes[3] +} + +func (x AnnotationType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AnnotationType.Descriptor instead. +func (AnnotationType) EnumDescriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{3} +} + +type GeneralReq struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + RepoId string `protobuf:"bytes,2,opt,name=repo_id,json=repoId,proto3" json:"repo_id,omitempty"` + ReqType RequestType `protobuf:"varint,3,opt,name=req_type,json=reqType,proto3,enum=ymir.backend.RequestType" json:"req_type,omitempty"` + TaskId string `protobuf:"bytes,4,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // singleton ops arg, such as checkout rev, create branch, etc. + SingletonOp string `protobuf:"bytes,5,opt,name=singleton_op,json=singletonOp,proto3" json:"singleton_op,omitempty"` + // ancestor task id that initiates this task from. + HisTaskId string `protobuf:"bytes,6,opt,name=his_task_id,json=hisTaskId,proto3" json:"his_task_id,omitempty"` + // dest branch id to store current task result. + DstDatasetId string `protobuf:"bytes,7,opt,name=dst_dataset_id,json=dstDatasetId,proto3" json:"dst_dataset_id,omitempty"` + // branches you want to concat the data from + InDatasetIds []string `protobuf:"bytes,8,rep,name=in_dataset_ids,json=inDatasetIds,proto3" json:"in_dataset_ids,omitempty"` + // branches you want to exclude the data from + ExDatasetIds []string `protobuf:"bytes,9,rep,name=ex_dataset_ids,json=exDatasetIds,proto3" json:"ex_dataset_ids,omitempty"` + // keyid should be included, joint with OR + InClassIds []int32 `protobuf:"varint,10,rep,packed,name=in_class_ids,json=inClassIds,proto3" json:"in_class_ids,omitempty"` + // keyid that expected to be exclude, joint with OR + ExClassIds []int32 `protobuf:"varint,11,rep,packed,name=ex_class_ids,json=exClassIds,proto3" json:"ex_class_ids,omitempty"` + Force bool `protobuf:"varint,12,opt,name=force,proto3" json:"force,omitempty"` + CommitMessage string `protobuf:"bytes,13,opt,name=commit_message,json=commitMessage,proto3" json:"commit_message,omitempty"` + ModelHash string `protobuf:"bytes,14,opt,name=model_hash,json=modelHash,proto3" json:"model_hash,omitempty"` + AssetDir string `protobuf:"bytes,15,opt,name=asset_dir,json=assetDir,proto3" json:"asset_dir,omitempty"` + DockerImageConfig string `protobuf:"bytes,16,opt,name=docker_image_config,json=dockerImageConfig,proto3" json:"docker_image_config,omitempty"` + CheckOnly bool `protobuf:"varint,18,opt,name=check_only,json=checkOnly,proto3" json:"check_only,omitempty"` + ExecutantName string `protobuf:"bytes,19,opt,name=executant_name,json=executantName,proto3" json:"executant_name,omitempty"` + MergeStrategy MergeStrategy `protobuf:"varint,20,opt,name=merge_strategy,json=mergeStrategy,proto3,enum=ymir.backend.MergeStrategy" json:"merge_strategy,omitempty"` + TerminatedTaskType TaskType `protobuf:"varint,21,opt,name=terminated_task_type,json=terminatedTaskType,proto3,enum=mir.command.TaskType" json:"terminated_task_type,omitempty"` + // Types that are assignable to Sampling: + // + // *GeneralReq_SamplingCount + // *GeneralReq_SamplingRate + Sampling isGeneralReq_Sampling `protobuf_oneof:"sampling"` + TaskParameters string `protobuf:"bytes,24,opt,name=task_parameters,json=taskParameters,proto3" json:"task_parameters,omitempty"` + LabelCollection *LabelCollection `protobuf:"bytes,25,opt,name=label_collection,json=labelCollection,proto3" json:"label_collection,omitempty"` + EvaluateConfig *EvaluateConfig `protobuf:"bytes,26,opt,name=evaluate_config,json=evaluateConfig,proto3" json:"evaluate_config,omitempty"` + ModelStage string `protobuf:"bytes,27,opt,name=model_stage,json=modelStage,proto3" json:"model_stage,omitempty"` + ReqCreateTask *ReqCreateTask `protobuf:"bytes,1001,opt,name=req_create_task,json=reqCreateTask,proto3" json:"req_create_task,omitempty"` +} + +func (x *GeneralReq) Reset() { + *x = GeneralReq{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneralReq) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneralReq) ProtoMessage() {} + +func (x *GeneralReq) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneralReq.ProtoReflect.Descriptor instead. +func (*GeneralReq) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{0} +} + +func (x *GeneralReq) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *GeneralReq) GetRepoId() string { + if x != nil { + return x.RepoId + } + return "" +} + +func (x *GeneralReq) GetReqType() RequestType { + if x != nil { + return x.ReqType + } + return RequestType_UNKNOWN +} + +func (x *GeneralReq) GetTaskId() string { + if x != nil { + return x.TaskId + } + return "" +} + +func (x *GeneralReq) GetSingletonOp() string { + if x != nil { + return x.SingletonOp + } + return "" +} + +func (x *GeneralReq) GetHisTaskId() string { + if x != nil { + return x.HisTaskId + } + return "" +} + +func (x *GeneralReq) GetDstDatasetId() string { + if x != nil { + return x.DstDatasetId + } + return "" +} + +func (x *GeneralReq) GetInDatasetIds() []string { + if x != nil { + return x.InDatasetIds + } + return nil +} + +func (x *GeneralReq) GetExDatasetIds() []string { + if x != nil { + return x.ExDatasetIds + } + return nil +} + +func (x *GeneralReq) GetInClassIds() []int32 { + if x != nil { + return x.InClassIds + } + return nil +} + +func (x *GeneralReq) GetExClassIds() []int32 { + if x != nil { + return x.ExClassIds + } + return nil +} + +func (x *GeneralReq) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + +func (x *GeneralReq) GetCommitMessage() string { + if x != nil { + return x.CommitMessage + } + return "" +} + +func (x *GeneralReq) GetModelHash() string { + if x != nil { + return x.ModelHash + } + return "" +} + +func (x *GeneralReq) GetAssetDir() string { + if x != nil { + return x.AssetDir + } + return "" +} + +func (x *GeneralReq) GetDockerImageConfig() string { + if x != nil { + return x.DockerImageConfig + } + return "" +} + +func (x *GeneralReq) GetCheckOnly() bool { + if x != nil { + return x.CheckOnly + } + return false +} + +func (x *GeneralReq) GetExecutantName() string { + if x != nil { + return x.ExecutantName + } + return "" +} + +func (x *GeneralReq) GetMergeStrategy() MergeStrategy { + if x != nil { + return x.MergeStrategy + } + return MergeStrategy_STOP +} + +func (x *GeneralReq) GetTerminatedTaskType() TaskType { + if x != nil { + return x.TerminatedTaskType + } + return TaskType_TaskTypeUnknown +} + +func (m *GeneralReq) GetSampling() isGeneralReq_Sampling { + if m != nil { + return m.Sampling + } + return nil +} + +func (x *GeneralReq) GetSamplingCount() int32 { + if x, ok := x.GetSampling().(*GeneralReq_SamplingCount); ok { + return x.SamplingCount + } + return 0 +} + +func (x *GeneralReq) GetSamplingRate() float32 { + if x, ok := x.GetSampling().(*GeneralReq_SamplingRate); ok { + return x.SamplingRate + } + return 0 +} + +func (x *GeneralReq) GetTaskParameters() string { + if x != nil { + return x.TaskParameters + } + return "" +} + +func (x *GeneralReq) GetLabelCollection() *LabelCollection { + if x != nil { + return x.LabelCollection + } + return nil +} + +func (x *GeneralReq) GetEvaluateConfig() *EvaluateConfig { + if x != nil { + return x.EvaluateConfig + } + return nil +} + +func (x *GeneralReq) GetModelStage() string { + if x != nil { + return x.ModelStage + } + return "" +} + +func (x *GeneralReq) GetReqCreateTask() *ReqCreateTask { + if x != nil { + return x.ReqCreateTask + } + return nil +} + +type isGeneralReq_Sampling interface { + isGeneralReq_Sampling() +} + +type GeneralReq_SamplingCount struct { + SamplingCount int32 `protobuf:"varint,22,opt,name=sampling_count,json=samplingCount,proto3,oneof"` +} + +type GeneralReq_SamplingRate struct { + SamplingRate float32 `protobuf:"fixed32,23,opt,name=sampling_rate,json=samplingRate,proto3,oneof"` +} + +func (*GeneralReq_SamplingCount) isGeneralReq_Sampling() {} + +func (*GeneralReq_SamplingRate) isGeneralReq_Sampling() {} + +type GeneralResp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + ReqTaskId string `protobuf:"bytes,2,opt,name=req_task_id,json=reqTaskId,proto3" json:"req_task_id,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + ExtStrs []string `protobuf:"bytes,4,rep,name=ext_strs,json=extStrs,proto3" json:"ext_strs,omitempty"` + HashId string `protobuf:"bytes,6,opt,name=hash_id,json=hashId,proto3" json:"hash_id,omitempty"` + DockerImageConfig map[int32]string `protobuf:"bytes,7,rep,name=docker_image_config,json=dockerImageConfig,proto3" json:"docker_image_config,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + AvailableGpuCounts int32 `protobuf:"varint,8,opt,name=available_gpu_counts,json=availableGpuCounts,proto3" json:"available_gpu_counts,omitempty"` + LabelCollection *LabelCollection `protobuf:"bytes,9,opt,name=label_collection,json=labelCollection,proto3" json:"label_collection,omitempty"` + OpsRet bool `protobuf:"varint,10,opt,name=ops_ret,json=opsRet,proto3" json:"ops_ret,omitempty"` + SandboxVersions []string `protobuf:"bytes,11,rep,name=sandbox_versions,json=sandboxVersions,proto3" json:"sandbox_versions,omitempty"` + Detection *RespCMDInference `protobuf:"bytes,1000,opt,name=detection,proto3" json:"detection,omitempty"` + EnableLivecode bool `protobuf:"varint,1001,opt,name=enable_livecode,json=enableLivecode,proto3" json:"enable_livecode,omitempty"` + Evaluation *Evaluation `protobuf:"bytes,1002,opt,name=evaluation,proto3" json:"evaluation,omitempty"` +} + +func (x *GeneralResp) Reset() { + *x = GeneralResp{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GeneralResp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GeneralResp) ProtoMessage() {} + +func (x *GeneralResp) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GeneralResp.ProtoReflect.Descriptor instead. +func (*GeneralResp) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{1} +} + +func (x *GeneralResp) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *GeneralResp) GetReqTaskId() string { + if x != nil { + return x.ReqTaskId + } + return "" +} + +func (x *GeneralResp) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *GeneralResp) GetExtStrs() []string { + if x != nil { + return x.ExtStrs + } + return nil +} + +func (x *GeneralResp) GetHashId() string { + if x != nil { + return x.HashId + } + return "" +} + +func (x *GeneralResp) GetDockerImageConfig() map[int32]string { + if x != nil { + return x.DockerImageConfig + } + return nil +} + +func (x *GeneralResp) GetAvailableGpuCounts() int32 { + if x != nil { + return x.AvailableGpuCounts + } + return 0 +} + +func (x *GeneralResp) GetLabelCollection() *LabelCollection { + if x != nil { + return x.LabelCollection + } + return nil +} + +func (x *GeneralResp) GetOpsRet() bool { + if x != nil { + return x.OpsRet + } + return false +} + +func (x *GeneralResp) GetSandboxVersions() []string { + if x != nil { + return x.SandboxVersions + } + return nil +} + +func (x *GeneralResp) GetDetection() *RespCMDInference { + if x != nil { + return x.Detection + } + return nil +} + +func (x *GeneralResp) GetEnableLivecode() bool { + if x != nil { + return x.EnableLivecode + } + return false +} + +func (x *GeneralResp) GetEvaluation() *Evaluation { + if x != nil { + return x.Evaluation + } + return nil +} + +// base args for create task request +type ReqCreateTask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // task type + TaskType TaskType `protobuf:"varint,1,opt,name=task_type,json=taskType,proto3,enum=mir.command.TaskType" json:"task_type,omitempty"` + NoTaskMonitor bool `protobuf:"varint,2,opt,name=no_task_monitor,json=noTaskMonitor,proto3" json:"no_task_monitor,omitempty"` + Training *TaskReqTraining `protobuf:"bytes,101,opt,name=training,proto3" json:"training,omitempty"` + Mining *TaskReqMining `protobuf:"bytes,102,opt,name=mining,proto3" json:"mining,omitempty"` + ImportDataset *TaskReqImportDataset `protobuf:"bytes,103,opt,name=import_dataset,json=importDataset,proto3" json:"import_dataset,omitempty"` + Exporting *TaskReqExporting `protobuf:"bytes,104,opt,name=exporting,proto3" json:"exporting,omitempty"` + Copy *TaskReqCopyData `protobuf:"bytes,105,opt,name=copy,proto3" json:"copy,omitempty"` + Labeling *TaskReqLabeling `protobuf:"bytes,106,opt,name=labeling,proto3" json:"labeling,omitempty"` + ImportModel *TaskReqImportModel `protobuf:"bytes,107,opt,name=import_model,json=importModel,proto3" json:"import_model,omitempty"` +} + +func (x *ReqCreateTask) Reset() { + *x = ReqCreateTask{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReqCreateTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReqCreateTask) ProtoMessage() {} + +func (x *ReqCreateTask) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReqCreateTask.ProtoReflect.Descriptor instead. +func (*ReqCreateTask) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{2} +} + +func (x *ReqCreateTask) GetTaskType() TaskType { + if x != nil { + return x.TaskType + } + return TaskType_TaskTypeUnknown +} + +func (x *ReqCreateTask) GetNoTaskMonitor() bool { + if x != nil { + return x.NoTaskMonitor + } + return false +} + +func (x *ReqCreateTask) GetTraining() *TaskReqTraining { + if x != nil { + return x.Training + } + return nil +} + +func (x *ReqCreateTask) GetMining() *TaskReqMining { + if x != nil { + return x.Mining + } + return nil +} + +func (x *ReqCreateTask) GetImportDataset() *TaskReqImportDataset { + if x != nil { + return x.ImportDataset + } + return nil +} + +func (x *ReqCreateTask) GetExporting() *TaskReqExporting { + if x != nil { + return x.Exporting + } + return nil +} + +func (x *ReqCreateTask) GetCopy() *TaskReqCopyData { + if x != nil { + return x.Copy + } + return nil +} + +func (x *ReqCreateTask) GetLabeling() *TaskReqLabeling { + if x != nil { + return x.Labeling + } + return nil +} + +func (x *ReqCreateTask) GetImportModel() *TaskReqImportModel { + if x != nil { + return x.ImportModel + } + return nil +} + +type TaskReqTraining struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + InDatasetTypes []*TaskReqTraining_TrainingDatasetType `protobuf:"bytes,1,rep,name=in_dataset_types,json=inDatasetTypes,proto3" json:"in_dataset_types,omitempty"` + PreprocessConfig string `protobuf:"bytes,2,opt,name=preprocess_config,json=preprocessConfig,proto3" json:"preprocess_config,omitempty"` +} + +func (x *TaskReqTraining) Reset() { + *x = TaskReqTraining{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqTraining) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqTraining) ProtoMessage() {} + +func (x *TaskReqTraining) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqTraining.ProtoReflect.Descriptor instead. +func (*TaskReqTraining) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{3} +} + +func (x *TaskReqTraining) GetInDatasetTypes() []*TaskReqTraining_TrainingDatasetType { + if x != nil { + return x.InDatasetTypes + } + return nil +} + +func (x *TaskReqTraining) GetPreprocessConfig() string { + if x != nil { + return x.PreprocessConfig + } + return "" +} + +type TaskReqMining struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TopK int32 `protobuf:"varint,1,opt,name=top_k,json=topK,proto3" json:"top_k,omitempty"` // > 0, will keep all if set to 0. + GenerateAnnotations bool `protobuf:"varint,2,opt,name=generate_annotations,json=generateAnnotations,proto3" json:"generate_annotations,omitempty"` +} + +func (x *TaskReqMining) Reset() { + *x = TaskReqMining{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqMining) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqMining) ProtoMessage() {} + +func (x *TaskReqMining) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqMining.ProtoReflect.Descriptor instead. +func (*TaskReqMining) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{4} +} + +func (x *TaskReqMining) GetTopK() int32 { + if x != nil { + return x.TopK + } + return 0 +} + +func (x *TaskReqMining) GetGenerateAnnotations() bool { + if x != nil { + return x.GenerateAnnotations + } + return false +} + +type TaskReqImportDataset struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // store media files + AssetDir string `protobuf:"bytes,1,opt,name=asset_dir,json=assetDir,proto3" json:"asset_dir,omitempty"` + // single pascal xml per asset, same base_filename as in asset-folder + PredDir string `protobuf:"bytes,2,opt,name=pred_dir,json=predDir,proto3" json:"pred_dir,omitempty"` + GtDir string `protobuf:"bytes,3,opt,name=gt_dir,json=gtDir,proto3" json:"gt_dir,omitempty"` + // strategy for unknown class types: stop, ignore or add + UnknownTypesStrategy UnknownTypesStrategy `protobuf:"varint,4,opt,name=unknown_types_strategy,json=unknownTypesStrategy,proto3,enum=ymir.backend.UnknownTypesStrategy" json:"unknown_types_strategy,omitempty"` + CleanDirs bool `protobuf:"varint,5,opt,name=clean_dirs,json=cleanDirs,proto3" json:"clean_dirs,omitempty"` +} + +func (x *TaskReqImportDataset) Reset() { + *x = TaskReqImportDataset{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqImportDataset) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqImportDataset) ProtoMessage() {} + +func (x *TaskReqImportDataset) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqImportDataset.ProtoReflect.Descriptor instead. +func (*TaskReqImportDataset) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{5} +} + +func (x *TaskReqImportDataset) GetAssetDir() string { + if x != nil { + return x.AssetDir + } + return "" +} + +func (x *TaskReqImportDataset) GetPredDir() string { + if x != nil { + return x.PredDir + } + return "" +} + +func (x *TaskReqImportDataset) GetGtDir() string { + if x != nil { + return x.GtDir + } + return "" +} + +func (x *TaskReqImportDataset) GetUnknownTypesStrategy() UnknownTypesStrategy { + if x != nil { + return x.UnknownTypesStrategy + } + return UnknownTypesStrategy_UTS_STOP +} + +func (x *TaskReqImportDataset) GetCleanDirs() bool { + if x != nil { + return x.CleanDirs + } + return false +} + +type TaskReqExporting struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` + Format AnnoFormat `protobuf:"varint,2,opt,name=format,proto3,enum=mir.command.AnnoFormat" json:"format,omitempty"` + AssetDir string `protobuf:"bytes,3,opt,name=asset_dir,json=assetDir,proto3" json:"asset_dir,omitempty"` + PredDir string `protobuf:"bytes,4,opt,name=pred_dir,json=predDir,proto3" json:"pred_dir,omitempty"` + GtDir string `protobuf:"bytes,5,opt,name=gt_dir,json=gtDir,proto3" json:"gt_dir,omitempty"` +} + +func (x *TaskReqExporting) Reset() { + *x = TaskReqExporting{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqExporting) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqExporting) ProtoMessage() {} + +func (x *TaskReqExporting) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqExporting.ProtoReflect.Descriptor instead. +func (*TaskReqExporting) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{6} +} + +func (x *TaskReqExporting) GetDatasetId() string { + if x != nil { + return x.DatasetId + } + return "" +} + +func (x *TaskReqExporting) GetFormat() AnnoFormat { + if x != nil { + return x.Format + } + return AnnoFormat_AF_NO_ANNOTATION +} + +func (x *TaskReqExporting) GetAssetDir() string { + if x != nil { + return x.AssetDir + } + return "" +} + +func (x *TaskReqExporting) GetPredDir() string { + if x != nil { + return x.PredDir + } + return "" +} + +func (x *TaskReqExporting) GetGtDir() string { + if x != nil { + return x.GtDir + } + return "" +} + +type TaskReqCopyData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SrcUserId string `protobuf:"bytes,1,opt,name=src_user_id,json=srcUserId,proto3" json:"src_user_id,omitempty"` + SrcRepoId string `protobuf:"bytes,2,opt,name=src_repo_id,json=srcRepoId,proto3" json:"src_repo_id,omitempty"` + NameStrategyIgnore bool `protobuf:"varint,3,opt,name=name_strategy_ignore,json=nameStrategyIgnore,proto3" json:"name_strategy_ignore,omitempty"` + DropAnnotations bool `protobuf:"varint,4,opt,name=drop_annotations,json=dropAnnotations,proto3" json:"drop_annotations,omitempty"` +} + +func (x *TaskReqCopyData) Reset() { + *x = TaskReqCopyData{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqCopyData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqCopyData) ProtoMessage() {} + +func (x *TaskReqCopyData) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqCopyData.ProtoReflect.Descriptor instead. +func (*TaskReqCopyData) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{7} +} + +func (x *TaskReqCopyData) GetSrcUserId() string { + if x != nil { + return x.SrcUserId + } + return "" +} + +func (x *TaskReqCopyData) GetSrcRepoId() string { + if x != nil { + return x.SrcRepoId + } + return "" +} + +func (x *TaskReqCopyData) GetNameStrategyIgnore() bool { + if x != nil { + return x.NameStrategyIgnore + } + return false +} + +func (x *TaskReqCopyData) GetDropAnnotations() bool { + if x != nil { + return x.DropAnnotations + } + return false +} + +type TaskReqLabeling struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + LabelerAccounts []string `protobuf:"bytes,1,rep,name=labeler_accounts,json=labelerAccounts,proto3" json:"labeler_accounts,omitempty"` + ExpertInstructionUrl string `protobuf:"bytes,2,opt,name=expert_instruction_url,json=expertInstructionUrl,proto3" json:"expert_instruction_url,omitempty"` + ProjectName string `protobuf:"bytes,3,opt,name=project_name,json=projectName,proto3" json:"project_name,omitempty"` + ExportAnnotation bool `protobuf:"varint,4,opt,name=export_annotation,json=exportAnnotation,proto3" json:"export_annotation,omitempty"` + AnnotationType AnnotationType `protobuf:"varint,5,opt,name=annotation_type,json=annotationType,proto3,enum=ymir.backend.AnnotationType" json:"annotation_type,omitempty"` +} + +func (x *TaskReqLabeling) Reset() { + *x = TaskReqLabeling{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqLabeling) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqLabeling) ProtoMessage() {} + +func (x *TaskReqLabeling) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqLabeling.ProtoReflect.Descriptor instead. +func (*TaskReqLabeling) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{8} +} + +func (x *TaskReqLabeling) GetLabelerAccounts() []string { + if x != nil { + return x.LabelerAccounts + } + return nil +} + +func (x *TaskReqLabeling) GetExpertInstructionUrl() string { + if x != nil { + return x.ExpertInstructionUrl + } + return "" +} + +func (x *TaskReqLabeling) GetProjectName() string { + if x != nil { + return x.ProjectName + } + return "" +} + +func (x *TaskReqLabeling) GetExportAnnotation() bool { + if x != nil { + return x.ExportAnnotation + } + return false +} + +func (x *TaskReqLabeling) GetAnnotationType() AnnotationType { + if x != nil { + return x.AnnotationType + } + return AnnotationType_NOT_SET +} + +type TaskReqImportModel struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ModelPackagePath string `protobuf:"bytes,1,opt,name=model_package_path,json=modelPackagePath,proto3" json:"model_package_path,omitempty"` +} + +func (x *TaskReqImportModel) Reset() { + *x = TaskReqImportModel{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqImportModel) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqImportModel) ProtoMessage() {} + +func (x *TaskReqImportModel) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqImportModel.ProtoReflect.Descriptor instead. +func (*TaskReqImportModel) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{9} +} + +func (x *TaskReqImportModel) GetModelPackagePath() string { + if x != nil { + return x.ModelPackagePath + } + return "" +} + +type RespCMDInference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // / key: image id, value: annotations of that single image + ImageAnnotations map[string]*SingleImageAnnotations `protobuf:"bytes,1,rep,name=image_annotations,json=imageAnnotations,proto3" json:"image_annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RespCMDInference) Reset() { + *x = RespCMDInference{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RespCMDInference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RespCMDInference) ProtoMessage() {} + +func (x *RespCMDInference) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RespCMDInference.ProtoReflect.Descriptor instead. +func (*RespCMDInference) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{10} +} + +func (x *RespCMDInference) GetImageAnnotations() map[string]*SingleImageAnnotations { + if x != nil { + return x.ImageAnnotations + } + return nil +} + +type LabelCollection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels []*Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty"` + YmirVersion string `protobuf:"bytes,2,opt,name=ymir_version,json=ymirVersion,proto3" json:"ymir_version,omitempty"` +} + +func (x *LabelCollection) Reset() { + *x = LabelCollection{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *LabelCollection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*LabelCollection) ProtoMessage() {} + +func (x *LabelCollection) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use LabelCollection.ProtoReflect.Descriptor instead. +func (*LabelCollection) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{11} +} + +func (x *LabelCollection) GetLabels() []*Label { + if x != nil { + return x.Labels + } + return nil +} + +func (x *LabelCollection) GetYmirVersion() string { + if x != nil { + return x.YmirVersion + } + return "" +} + +type Label struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id int32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Aliases []string `protobuf:"bytes,3,rep,name=aliases,proto3" json:"aliases,omitempty"` + CreateTime string `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // RFC 3339 date strings + UpdateTime string `protobuf:"bytes,5,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // RFC 3339 date strings +} + +func (x *Label) Reset() { + *x = Label{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Label) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Label) ProtoMessage() {} + +func (x *Label) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Label.ProtoReflect.Descriptor instead. +func (*Label) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{12} +} + +func (x *Label) GetId() int32 { + if x != nil { + return x.Id + } + return 0 +} + +func (x *Label) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Label) GetAliases() []string { + if x != nil { + return x.Aliases + } + return nil +} + +func (x *Label) GetCreateTime() string { + if x != nil { + return x.CreateTime + } + return "" +} + +func (x *Label) GetUpdateTime() string { + if x != nil { + return x.UpdateTime + } + return "" +} + +type TaskReqTraining_TrainingDatasetType struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DatasetId string `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` + DatasetType TvtType `protobuf:"varint,2,opt,name=dataset_type,json=datasetType,proto3,enum=mir.command.TvtType" json:"dataset_type,omitempty"` +} + +func (x *TaskReqTraining_TrainingDatasetType) Reset() { + *x = TaskReqTraining_TrainingDatasetType{} + if protoimpl.UnsafeEnabled { + mi := &file_backend_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskReqTraining_TrainingDatasetType) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskReqTraining_TrainingDatasetType) ProtoMessage() {} + +func (x *TaskReqTraining_TrainingDatasetType) ProtoReflect() protoreflect.Message { + mi := &file_backend_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskReqTraining_TrainingDatasetType.ProtoReflect.Descriptor instead. +func (*TaskReqTraining_TrainingDatasetType) Descriptor() ([]byte, []int) { + return file_backend_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *TaskReqTraining_TrainingDatasetType) GetDatasetId() string { + if x != nil { + return x.DatasetId + } + return "" +} + +func (x *TaskReqTraining_TrainingDatasetType) GetDatasetType() TvtType { + if x != nil { + return x.DatasetType + } + return TvtType_TvtTypeUnknown +} + +var File_backend_proto protoreflect.FileDescriptor + +var file_backend_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x0c, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x1a, 0x11, 0x6d, + 0x69, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x84, 0x09, 0x0a, 0x0a, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x12, + 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6f, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x70, 0x6f, 0x49, + 0x64, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, + 0x72, 0x65, 0x71, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x74, 0x6f, 0x6e, 0x5f, 0x6f, 0x70, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x74, 0x6f, + 0x6e, 0x4f, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x68, 0x69, 0x73, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x68, 0x69, 0x73, 0x54, 0x61, 0x73, + 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x73, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x73, 0x74, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x69, 0x6e, 0x5f, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x73, 0x12, + 0x24, 0x0a, 0x0e, 0x65, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x78, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x69, 0x6e, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x69, 0x6e, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x65, 0x78, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0a, 0x65, + 0x78, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x64, + 0x69, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x73, 0x73, 0x65, 0x74, 0x44, + 0x69, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x11, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, + 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x4f, 0x6e, 0x6c, + 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x61, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6d, 0x65, 0x72, 0x67, + 0x65, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x1b, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, + 0x4d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x6d, + 0x65, 0x72, 0x67, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x47, 0x0a, 0x14, + 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x6d, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x12, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, + 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, + 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, + 0x17, 0x20, 0x01, 0x28, 0x02, 0x48, 0x00, 0x52, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x52, 0x61, 0x74, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, + 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, + 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x0f, 0x65, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, + 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0e, + 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x1b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, + 0x44, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x61, + 0x73, 0x6b, 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x79, 0x6d, 0x69, 0x72, + 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x71, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, + 0x67, 0x4a, 0x04, 0x08, 0x11, 0x10, 0x12, 0x22, 0xa0, 0x05, 0x0a, 0x0b, 0x47, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x1e, 0x0a, 0x0b, 0x72, + 0x65, 0x71, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x5f, 0x73, 0x74, 0x72, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x74, 0x53, 0x74, 0x72, 0x73, + 0x12, 0x17, 0x0a, 0x07, 0x68, 0x61, 0x73, 0x68, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x49, 0x64, 0x12, 0x60, 0x0a, 0x13, 0x64, 0x6f, 0x63, + 0x6b, 0x65, 0x72, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x65, 0x73, + 0x70, 0x2e, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x6f, 0x63, 0x6b, 0x65, 0x72, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x30, 0x0a, 0x14, 0x61, + 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x67, 0x70, 0x75, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x47, 0x70, 0x75, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x48, 0x0a, + 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, + 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x70, 0x73, 0x5f, 0x72, + 0x65, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6f, 0x70, 0x73, 0x52, 0x65, 0x74, + 0x12, 0x29, 0x0a, 0x10, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3d, 0x0a, 0x09, 0x64, + 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe8, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1e, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x43, 0x4d, 0x44, 0x49, 0x6e, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, + 0x09, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6e, + 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x18, 0xe9, 0x07, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x69, 0x76, 0x65, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x38, 0x0a, 0x0a, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x44, + 0x0a, 0x16, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x97, 0x04, 0x0a, 0x0d, 0x52, + 0x65, 0x71, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x32, 0x0a, 0x09, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x15, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x6f, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, + 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, 0x6f, 0x54, 0x61, 0x73, + 0x6b, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x12, 0x39, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x69, + 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x79, 0x6d, 0x69, + 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x06, 0x6d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x66, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, + 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x52, 0x06, 0x6d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x49, 0x0a, 0x0e, 0x69, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x67, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x52, 0x0d, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x18, 0x68, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x45, 0x78, 0x70, + 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x52, 0x09, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, + 0x67, 0x12, 0x31, 0x0a, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x69, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x43, 0x6f, 0x70, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, + 0x63, 0x6f, 0x70, 0x79, 0x12, 0x39, 0x0a, 0x08, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x69, 0x6e, 0x67, + 0x18, 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, + 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x08, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x69, 0x6e, 0x67, 0x12, + 0x43, 0x0a, 0x0c, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, + 0x6b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x52, 0x0b, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, + 0x6f, 0x64, 0x65, 0x6c, 0x22, 0x8a, 0x02, 0x0a, 0x0f, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, + 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x5b, 0x0a, 0x10, 0x69, 0x6e, 0x5f, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0e, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x10, 0x70, 0x72, 0x65, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x6d, 0x0a, 0x13, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x37, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, + 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x76, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x22, 0x57, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x4d, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x04, 0x74, 0x6f, 0x70, 0x4b, 0x12, 0x31, 0x0a, 0x14, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x5f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x14, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x73, 0x73, 0x65, 0x74, 0x44, 0x69, 0x72, + 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x65, 0x64, 0x44, 0x69, 0x72, 0x12, 0x15, 0x0a, 0x06, 0x67, + 0x74, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 0x74, 0x44, + 0x69, 0x72, 0x12, 0x58, 0x0a, 0x16, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, + 0x64, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x14, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x1d, 0x0a, 0x0a, + 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x64, 0x69, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x44, 0x69, 0x72, 0x73, 0x22, 0xb1, 0x01, 0x0a, 0x10, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, + 0x12, 0x1d, 0x0a, 0x0a, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, + 0x2f, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x17, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x73, 0x73, 0x65, 0x74, 0x44, 0x69, 0x72, 0x12, 0x19, 0x0a, + 0x08, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x72, 0x65, 0x64, 0x44, 0x69, 0x72, 0x12, 0x15, 0x0a, 0x06, 0x67, 0x74, 0x5f, 0x64, + 0x69, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 0x74, 0x44, 0x69, 0x72, 0x22, + 0xae, 0x01, 0x0a, 0x0f, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x43, 0x6f, 0x70, 0x79, 0x44, + 0x61, 0x74, 0x61, 0x12, 0x1e, 0x0a, 0x0b, 0x73, 0x72, 0x63, 0x5f, 0x75, 0x73, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x72, 0x63, 0x55, 0x73, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0b, 0x73, 0x72, 0x63, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x72, 0x63, 0x52, 0x65, 0x70, + 0x6f, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x5f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x49, + 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x64, 0x72, 0x6f, 0x70, 0x5f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x64, 0x72, 0x6f, 0x70, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x89, 0x02, 0x0a, 0x0f, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x65, 0x72, 0x5f, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x65, 0x72, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, + 0x34, 0x0a, 0x16, 0x65, 0x78, 0x70, 0x65, 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x5f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x0f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, + 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0e, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x22, 0x42, 0x0a, 0x12, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x6f, 0x64, + 0x65, 0x6c, 0x12, 0x2c, 0x0a, 0x12, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, 0x70, 0x61, 0x63, 0x6b, + 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x50, 0x61, 0x74, 0x68, + 0x22, 0xdf, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x70, 0x43, 0x4d, 0x44, 0x49, 0x6e, 0x66, 0x65, + 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x61, 0x0a, 0x11, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x43, 0x4d, 0x44, 0x49, 0x6e, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, + 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x68, 0x0a, 0x15, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x61, 0x0a, 0x0f, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x79, 0x6d, 0x69, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x79, 0x6d, 0x69, 0x72, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x87, 0x01, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x2a, + 0x2e, 0x0a, 0x0d, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, + 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, + 0x53, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x55, 0x45, 0x53, 0x54, 0x10, 0x02, 0x2a, + 0x41, 0x0a, 0x14, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x53, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x0c, 0x0a, 0x08, 0x55, 0x54, 0x53, 0x5f, 0x53, + 0x54, 0x4f, 0x50, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x54, 0x53, 0x5f, 0x49, 0x47, 0x4e, + 0x4f, 0x52, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x54, 0x53, 0x5f, 0x41, 0x44, 0x44, + 0x10, 0x02, 0x2a, 0x9e, 0x04, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x12, 0x0a, 0x0e, 0x43, 0x4d, 0x44, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48, 0x5f, 0x44, 0x45, + 0x4c, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4d, 0x44, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, + 0x48, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x43, 0x4d, 0x44, 0x5f, + 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x4f, 0x55, 0x54, 0x10, + 0x03, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4d, 0x44, 0x5f, 0x42, 0x52, 0x41, 0x4e, 0x43, 0x48, 0x5f, + 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4d, 0x44, 0x5f, + 0x43, 0x4c, 0x4f, 0x4e, 0x45, 0x10, 0x05, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4d, 0x44, 0x5f, 0x43, + 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x06, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4d, 0x44, 0x5f, 0x46, + 0x49, 0x4c, 0x54, 0x45, 0x52, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4d, 0x44, 0x5f, 0x49, + 0x4e, 0x49, 0x54, 0x10, 0x08, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x5f, 0x4c, 0x4f, 0x47, + 0x10, 0x09, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4d, 0x44, 0x5f, 0x4d, 0x45, 0x52, 0x47, 0x45, 0x10, + 0x0a, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4d, 0x44, 0x5f, 0x49, 0x4e, 0x46, 0x45, 0x52, 0x45, 0x4e, + 0x43, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4d, 0x44, 0x5f, 0x4c, 0x41, 0x42, 0x45, + 0x4c, 0x5f, 0x41, 0x44, 0x44, 0x10, 0x0c, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4d, 0x44, 0x5f, 0x4c, + 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x47, 0x45, 0x54, 0x10, 0x0d, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4d, + 0x44, 0x5f, 0x54, 0x45, 0x52, 0x4d, 0x49, 0x4e, 0x41, 0x54, 0x45, 0x10, 0x0e, 0x12, 0x12, 0x0a, + 0x0e, 0x43, 0x4d, 0x44, 0x5f, 0x50, 0x55, 0x4c, 0x4c, 0x5f, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x10, + 0x10, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4d, 0x44, 0x5f, 0x47, 0x50, 0x55, 0x5f, 0x49, 0x4e, 0x46, + 0x4f, 0x5f, 0x47, 0x45, 0x54, 0x10, 0x11, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4d, 0x44, 0x5f, 0x53, + 0x41, 0x4d, 0x50, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x12, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x4d, 0x44, + 0x5f, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x45, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, + 0x4d, 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4f, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x14, 0x12, + 0x12, 0x0a, 0x0e, 0x43, 0x4d, 0x44, 0x5f, 0x52, 0x45, 0x50, 0x4f, 0x5f, 0x43, 0x4c, 0x45, 0x41, + 0x52, 0x10, 0x15, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4d, 0x44, 0x5f, 0x56, 0x45, 0x52, 0x53, 0x49, + 0x4f, 0x4e, 0x53, 0x5f, 0x47, 0x45, 0x54, 0x10, 0x16, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x53, 0x45, + 0x52, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x53, 0x45, 0x52, + 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x66, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x53, 0x45, + 0x52, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x67, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, + 0x50, 0x4f, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x68, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x50, + 0x4f, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x69, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, + 0x50, 0x4f, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x10, 0x6a, 0x12, 0x10, 0x0a, 0x0b, 0x54, + 0x41, 0x53, 0x4b, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0xe9, 0x07, 0x22, 0x04, 0x08, + 0x0f, 0x10, 0x0f, 0x2a, 0x2f, 0x0a, 0x0e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x54, + 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x50, 0x52, + 0x45, 0x44, 0x10, 0x02, 0x32, 0x66, 0x0a, 0x16, 0x6d, 0x69, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4c, + 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, + 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x65, 0x71, 0x1a, + 0x19, 0x2e, 0x79, 0x6d, 0x69, 0x72, 0x2e, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x42, 0x09, 0x5a, 0x07, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_backend_proto_rawDescOnce sync.Once + file_backend_proto_rawDescData = file_backend_proto_rawDesc +) + +func file_backend_proto_rawDescGZIP() []byte { + file_backend_proto_rawDescOnce.Do(func() { + file_backend_proto_rawDescData = protoimpl.X.CompressGZIP(file_backend_proto_rawDescData) + }) + return file_backend_proto_rawDescData +} + +var file_backend_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_backend_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_backend_proto_goTypes = []interface{}{ + (MergeStrategy)(0), // 0: ymir.backend.MergeStrategy + (UnknownTypesStrategy)(0), // 1: ymir.backend.UnknownTypesStrategy + (RequestType)(0), // 2: ymir.backend.RequestType + (AnnotationType)(0), // 3: ymir.backend.AnnotationType + (*GeneralReq)(nil), // 4: ymir.backend.GeneralReq + (*GeneralResp)(nil), // 5: ymir.backend.GeneralResp + (*ReqCreateTask)(nil), // 6: ymir.backend.ReqCreateTask + (*TaskReqTraining)(nil), // 7: ymir.backend.TaskReqTraining + (*TaskReqMining)(nil), // 8: ymir.backend.TaskReqMining + (*TaskReqImportDataset)(nil), // 9: ymir.backend.TaskReqImportDataset + (*TaskReqExporting)(nil), // 10: ymir.backend.TaskReqExporting + (*TaskReqCopyData)(nil), // 11: ymir.backend.TaskReqCopyData + (*TaskReqLabeling)(nil), // 12: ymir.backend.TaskReqLabeling + (*TaskReqImportModel)(nil), // 13: ymir.backend.TaskReqImportModel + (*RespCMDInference)(nil), // 14: ymir.backend.RespCMDInference + (*LabelCollection)(nil), // 15: ymir.backend.LabelCollection + (*Label)(nil), // 16: ymir.backend.Label + nil, // 17: ymir.backend.GeneralResp.DockerImageConfigEntry + (*TaskReqTraining_TrainingDatasetType)(nil), // 18: ymir.backend.TaskReqTraining.TrainingDatasetType + nil, // 19: ymir.backend.RespCMDInference.ImageAnnotationsEntry + (TaskType)(0), // 20: mir.command.TaskType + (*EvaluateConfig)(nil), // 21: mir.command.EvaluateConfig + (*Evaluation)(nil), // 22: mir.command.Evaluation + (AnnoFormat)(0), // 23: mir.command.AnnoFormat + (TvtType)(0), // 24: mir.command.TvtType + (*SingleImageAnnotations)(nil), // 25: mir.command.SingleImageAnnotations +} +var file_backend_proto_depIdxs = []int32{ + 2, // 0: ymir.backend.GeneralReq.req_type:type_name -> ymir.backend.RequestType + 0, // 1: ymir.backend.GeneralReq.merge_strategy:type_name -> ymir.backend.MergeStrategy + 20, // 2: ymir.backend.GeneralReq.terminated_task_type:type_name -> mir.command.TaskType + 15, // 3: ymir.backend.GeneralReq.label_collection:type_name -> ymir.backend.LabelCollection + 21, // 4: ymir.backend.GeneralReq.evaluate_config:type_name -> mir.command.EvaluateConfig + 6, // 5: ymir.backend.GeneralReq.req_create_task:type_name -> ymir.backend.ReqCreateTask + 17, // 6: ymir.backend.GeneralResp.docker_image_config:type_name -> ymir.backend.GeneralResp.DockerImageConfigEntry + 15, // 7: ymir.backend.GeneralResp.label_collection:type_name -> ymir.backend.LabelCollection + 14, // 8: ymir.backend.GeneralResp.detection:type_name -> ymir.backend.RespCMDInference + 22, // 9: ymir.backend.GeneralResp.evaluation:type_name -> mir.command.Evaluation + 20, // 10: ymir.backend.ReqCreateTask.task_type:type_name -> mir.command.TaskType + 7, // 11: ymir.backend.ReqCreateTask.training:type_name -> ymir.backend.TaskReqTraining + 8, // 12: ymir.backend.ReqCreateTask.mining:type_name -> ymir.backend.TaskReqMining + 9, // 13: ymir.backend.ReqCreateTask.import_dataset:type_name -> ymir.backend.TaskReqImportDataset + 10, // 14: ymir.backend.ReqCreateTask.exporting:type_name -> ymir.backend.TaskReqExporting + 11, // 15: ymir.backend.ReqCreateTask.copy:type_name -> ymir.backend.TaskReqCopyData + 12, // 16: ymir.backend.ReqCreateTask.labeling:type_name -> ymir.backend.TaskReqLabeling + 13, // 17: ymir.backend.ReqCreateTask.import_model:type_name -> ymir.backend.TaskReqImportModel + 18, // 18: ymir.backend.TaskReqTraining.in_dataset_types:type_name -> ymir.backend.TaskReqTraining.TrainingDatasetType + 1, // 19: ymir.backend.TaskReqImportDataset.unknown_types_strategy:type_name -> ymir.backend.UnknownTypesStrategy + 23, // 20: ymir.backend.TaskReqExporting.format:type_name -> mir.command.AnnoFormat + 3, // 21: ymir.backend.TaskReqLabeling.annotation_type:type_name -> ymir.backend.AnnotationType + 19, // 22: ymir.backend.RespCMDInference.image_annotations:type_name -> ymir.backend.RespCMDInference.ImageAnnotationsEntry + 16, // 23: ymir.backend.LabelCollection.labels:type_name -> ymir.backend.Label + 24, // 24: ymir.backend.TaskReqTraining.TrainingDatasetType.dataset_type:type_name -> mir.command.TvtType + 25, // 25: ymir.backend.RespCMDInference.ImageAnnotationsEntry.value:type_name -> mir.command.SingleImageAnnotations + 4, // 26: ymir.backend.mir_controller_service.data_manage_request:input_type -> ymir.backend.GeneralReq + 5, // 27: ymir.backend.mir_controller_service.data_manage_request:output_type -> ymir.backend.GeneralResp + 27, // [27:28] is the sub-list for method output_type + 26, // [26:27] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_backend_proto_init() } +func file_backend_proto_init() { + if File_backend_proto != nil { + return + } + file_mir_command_proto_init() + if !protoimpl.UnsafeEnabled { + file_backend_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneralReq); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GeneralResp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReqCreateTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqTraining); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqMining); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqImportDataset); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqExporting); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqCopyData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqLabeling); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqImportModel); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RespCMDInference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*LabelCollection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Label); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_backend_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskReqTraining_TrainingDatasetType); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_backend_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*GeneralReq_SamplingCount)(nil), + (*GeneralReq_SamplingRate)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_backend_proto_rawDesc, + NumEnums: 4, + NumMessages: 16, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_backend_proto_goTypes, + DependencyIndexes: file_backend_proto_depIdxs, + EnumInfos: file_backend_proto_enumTypes, + MessageInfos: file_backend_proto_msgTypes, + }.Build() + File_backend_proto = out.File + file_backend_proto_rawDesc = nil + file_backend_proto_goTypes = nil + file_backend_proto_depIdxs = nil +} diff --git a/ymir/backend/src/ymir_hel/protos/backend_grpc.pb.go b/ymir/backend/src/ymir_hel/protos/backend_grpc.pb.go new file mode 100644 index 0000000000..9487d9ba0d --- /dev/null +++ b/ymir/backend/src/ymir_hel/protos/backend_grpc.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package protos + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// MirControllerServiceClient is the client API for MirControllerService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type MirControllerServiceClient interface { + DataManageRequest(ctx context.Context, in *GeneralReq, opts ...grpc.CallOption) (*GeneralResp, error) +} + +type mirControllerServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewMirControllerServiceClient(cc grpc.ClientConnInterface) MirControllerServiceClient { + return &mirControllerServiceClient{cc} +} + +func (c *mirControllerServiceClient) DataManageRequest(ctx context.Context, in *GeneralReq, opts ...grpc.CallOption) (*GeneralResp, error) { + out := new(GeneralResp) + err := c.cc.Invoke(ctx, "/ymir.backend.mir_controller_service/data_manage_request", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MirControllerServiceServer is the server API for MirControllerService service. +// All implementations should embed UnimplementedMirControllerServiceServer +// for forward compatibility +type MirControllerServiceServer interface { + DataManageRequest(context.Context, *GeneralReq) (*GeneralResp, error) +} + +// UnimplementedMirControllerServiceServer should be embedded to have forward compatible implementations. +type UnimplementedMirControllerServiceServer struct { +} + +func (UnimplementedMirControllerServiceServer) DataManageRequest(context.Context, *GeneralReq) (*GeneralResp, error) { + return nil, status.Errorf(codes.Unimplemented, "method DataManageRequest not implemented") +} + +// UnsafeMirControllerServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to MirControllerServiceServer will +// result in compilation errors. +type UnsafeMirControllerServiceServer interface { + mustEmbedUnimplementedMirControllerServiceServer() +} + +func RegisterMirControllerServiceServer(s grpc.ServiceRegistrar, srv MirControllerServiceServer) { + s.RegisterService(&MirControllerService_ServiceDesc, srv) +} + +func _MirControllerService_DataManageRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GeneralReq) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MirControllerServiceServer).DataManageRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ymir.backend.mir_controller_service/data_manage_request", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MirControllerServiceServer).DataManageRequest(ctx, req.(*GeneralReq)) + } + return interceptor(ctx, in, info, handler) +} + +// MirControllerService_ServiceDesc is the grpc.ServiceDesc for MirControllerService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var MirControllerService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "ymir.backend.mir_controller_service", + HandlerType: (*MirControllerServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "data_manage_request", + Handler: _MirControllerService_DataManageRequest_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "backend.proto", +} diff --git a/ymir/backend/src/ymir_hel/protos/mir_command.pb.go b/ymir/backend/src/ymir_hel/protos/mir_command.pb.go new file mode 100644 index 0000000000..d187ffcaef --- /dev/null +++ b/ymir/backend/src/ymir_hel/protos/mir_command.pb.go @@ -0,0 +1,4213 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.14.0 +// source: mir_command.proto + +package protos + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// / assertion type: training, validation or test +type TvtType int32 + +const ( + TvtType_TvtTypeUnknown TvtType = 0 + TvtType_TvtTypeTraining TvtType = 1 + TvtType_TvtTypeValidation TvtType = 2 + TvtType_TvtTypeTest TvtType = 3 +) + +// Enum value maps for TvtType. +var ( + TvtType_name = map[int32]string{ + 0: "TvtTypeUnknown", + 1: "TvtTypeTraining", + 2: "TvtTypeValidation", + 3: "TvtTypeTest", + } + TvtType_value = map[string]int32{ + "TvtTypeUnknown": 0, + "TvtTypeTraining": 1, + "TvtTypeValidation": 2, + "TvtTypeTest": 3, + } +) + +func (x TvtType) Enum() *TvtType { + p := new(TvtType) + *p = x + return p +} + +func (x TvtType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TvtType) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[0].Descriptor() +} + +func (TvtType) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[0] +} + +func (x TvtType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TvtType.Descriptor instead. +func (TvtType) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{0} +} + +type AssetType int32 + +const ( + AssetType_AssetTypeUnknown AssetType = 0 + AssetType_AssetTypeImageJpeg AssetType = 1 + AssetType_AssetTypeImagePng AssetType = 2 + AssetType_AssetTypeImagePixelMat AssetType = 3 + AssetType_AssetTypeImageYuv420p AssetType = 4 + AssetType_AssetTypeImageYuv420sp AssetType = 5 + AssetType_AssetTypeImageYuv422p AssetType = 6 + AssetType_AssetTypeImageYuv422sp AssetType = 7 + AssetType_AssetTypeImageBmp AssetType = 8 + AssetType_AssetTypeVideoMp4 AssetType = 101 +) + +// Enum value maps for AssetType. +var ( + AssetType_name = map[int32]string{ + 0: "AssetTypeUnknown", + 1: "AssetTypeImageJpeg", + 2: "AssetTypeImagePng", + 3: "AssetTypeImagePixelMat", + 4: "AssetTypeImageYuv420p", + 5: "AssetTypeImageYuv420sp", + 6: "AssetTypeImageYuv422p", + 7: "AssetTypeImageYuv422sp", + 8: "AssetTypeImageBmp", + 101: "AssetTypeVideoMp4", + } + AssetType_value = map[string]int32{ + "AssetTypeUnknown": 0, + "AssetTypeImageJpeg": 1, + "AssetTypeImagePng": 2, + "AssetTypeImagePixelMat": 3, + "AssetTypeImageYuv420p": 4, + "AssetTypeImageYuv420sp": 5, + "AssetTypeImageYuv422p": 6, + "AssetTypeImageYuv422sp": 7, + "AssetTypeImageBmp": 8, + "AssetTypeVideoMp4": 101, + } +) + +func (x AssetType) Enum() *AssetType { + p := new(AssetType) + *p = x + return p +} + +func (x AssetType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AssetType) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[1].Descriptor() +} + +func (AssetType) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[1] +} + +func (x AssetType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AssetType.Descriptor instead. +func (AssetType) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{1} +} + +// / task type +type TaskType int32 + +const ( + TaskType_TaskTypeUnknown TaskType = 0 + TaskType_TaskTypeTraining TaskType = 1 + TaskType_TaskTypeMining TaskType = 2 + TaskType_TaskTypeLabel TaskType = 3 + TaskType_TaskTypeFilter TaskType = 4 + TaskType_TaskTypeImportData TaskType = 5 + TaskType_TaskTypeExportData TaskType = 6 + TaskType_TaskTypeCopyData TaskType = 7 + TaskType_TaskTypeMerge TaskType = 8 + TaskType_TaskTypeInfer TaskType = 9 + TaskType_TaskTypeSampling TaskType = 10 + // / used by ymir_controller + TaskType_TaskTypeFusion TaskType = 11 + TaskType_TaskTypeInit TaskType = 12 + TaskType_TaskTypeImportModel TaskType = 13 + TaskType_TaskTypeCopyModel TaskType = 14 + TaskType_TaskTypeDatasetInfer TaskType = 15 + TaskType_TaskTypeEvaluate TaskType = 16 +) + +// Enum value maps for TaskType. +var ( + TaskType_name = map[int32]string{ + 0: "TaskTypeUnknown", + 1: "TaskTypeTraining", + 2: "TaskTypeMining", + 3: "TaskTypeLabel", + 4: "TaskTypeFilter", + 5: "TaskTypeImportData", + 6: "TaskTypeExportData", + 7: "TaskTypeCopyData", + 8: "TaskTypeMerge", + 9: "TaskTypeInfer", + 10: "TaskTypeSampling", + 11: "TaskTypeFusion", + 12: "TaskTypeInit", + 13: "TaskTypeImportModel", + 14: "TaskTypeCopyModel", + 15: "TaskTypeDatasetInfer", + 16: "TaskTypeEvaluate", + } + TaskType_value = map[string]int32{ + "TaskTypeUnknown": 0, + "TaskTypeTraining": 1, + "TaskTypeMining": 2, + "TaskTypeLabel": 3, + "TaskTypeFilter": 4, + "TaskTypeImportData": 5, + "TaskTypeExportData": 6, + "TaskTypeCopyData": 7, + "TaskTypeMerge": 8, + "TaskTypeInfer": 9, + "TaskTypeSampling": 10, + "TaskTypeFusion": 11, + "TaskTypeInit": 12, + "TaskTypeImportModel": 13, + "TaskTypeCopyModel": 14, + "TaskTypeDatasetInfer": 15, + "TaskTypeEvaluate": 16, + } +) + +func (x TaskType) Enum() *TaskType { + p := new(TaskType) + *p = x + return p +} + +func (x TaskType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TaskType) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[2].Descriptor() +} + +func (TaskType) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[2] +} + +func (x TaskType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TaskType.Descriptor instead. +func (TaskType) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{2} +} + +type TaskState int32 + +const ( + TaskState_TaskStateUnknown TaskState = 0 + TaskState_TaskStatePending TaskState = 1 + TaskState_TaskStateRunning TaskState = 2 + TaskState_TaskStateDone TaskState = 3 + TaskState_TaskStateError TaskState = 4 + TaskState_TaskStateMiss TaskState = 5 +) + +// Enum value maps for TaskState. +var ( + TaskState_name = map[int32]string{ + 0: "TaskStateUnknown", + 1: "TaskStatePending", + 2: "TaskStateRunning", + 3: "TaskStateDone", + 4: "TaskStateError", + 5: "TaskStateMiss", + } + TaskState_value = map[string]int32{ + "TaskStateUnknown": 0, + "TaskStatePending": 1, + "TaskStateRunning": 2, + "TaskStateDone": 3, + "TaskStateError": 4, + "TaskStateMiss": 5, + } +) + +func (x TaskState) Enum() *TaskState { + p := new(TaskState) + *p = x + return p +} + +func (x TaskState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TaskState) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[3].Descriptor() +} + +func (TaskState) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[3] +} + +func (x TaskState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TaskState.Descriptor instead. +func (TaskState) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{3} +} + +type Sha1Type int32 + +const ( + Sha1Type_SHA1_TYPE_UNKNOWN Sha1Type = 0 + Sha1Type_SHA1_TYPE_ASSET Sha1Type = 1 + Sha1Type_SHA1_TYPE_COMMIT Sha1Type = 2 +) + +// Enum value maps for Sha1Type. +var ( + Sha1Type_name = map[int32]string{ + 0: "SHA1_TYPE_UNKNOWN", + 1: "SHA1_TYPE_ASSET", + 2: "SHA1_TYPE_COMMIT", + } + Sha1Type_value = map[string]int32{ + "SHA1_TYPE_UNKNOWN": 0, + "SHA1_TYPE_ASSET": 1, + "SHA1_TYPE_COMMIT": 2, + } +) + +func (x Sha1Type) Enum() *Sha1Type { + p := new(Sha1Type) + *p = x + return p +} + +func (x Sha1Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Sha1Type) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[4].Descriptor() +} + +func (Sha1Type) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[4] +} + +func (x Sha1Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Sha1Type.Descriptor instead. +func (Sha1Type) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{4} +} + +type MirStorage int32 + +const ( + MirStorage_MIR_METADATAS MirStorage = 0 + MirStorage_MIR_ANNOTATIONS MirStorage = 1 + MirStorage_MIR_KEYWORDS MirStorage = 2 + MirStorage_MIR_TASKS MirStorage = 3 + MirStorage_MIR_CONTEXT MirStorage = 4 +) + +// Enum value maps for MirStorage. +var ( + MirStorage_name = map[int32]string{ + 0: "MIR_METADATAS", + 1: "MIR_ANNOTATIONS", + 2: "MIR_KEYWORDS", + 3: "MIR_TASKS", + 4: "MIR_CONTEXT", + } + MirStorage_value = map[string]int32{ + "MIR_METADATAS": 0, + "MIR_ANNOTATIONS": 1, + "MIR_KEYWORDS": 2, + "MIR_TASKS": 3, + "MIR_CONTEXT": 4, + } +) + +func (x MirStorage) Enum() *MirStorage { + p := new(MirStorage) + *p = x + return p +} + +func (x MirStorage) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MirStorage) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[5].Descriptor() +} + +func (MirStorage) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[5] +} + +func (x MirStorage) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MirStorage.Descriptor instead. +func (MirStorage) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{5} +} + +type AnnoFormat int32 + +const ( + AnnoFormat_AF_NO_ANNOTATION AnnoFormat = 0 + AnnoFormat_AF_DET_PASCAL_VOC AnnoFormat = 1 + AnnoFormat_AF_DET_ARK_JSON AnnoFormat = 2 + AnnoFormat_AF_DET_LS_JSON AnnoFormat = 3 + AnnoFormat_AF_SEG_POLYGON AnnoFormat = 4 + AnnoFormat_AF_SEG_MASK AnnoFormat = 5 +) + +// Enum value maps for AnnoFormat. +var ( + AnnoFormat_name = map[int32]string{ + 0: "AF_NO_ANNOTATION", + 1: "AF_DET_PASCAL_VOC", + 2: "AF_DET_ARK_JSON", + 3: "AF_DET_LS_JSON", + 4: "AF_SEG_POLYGON", + 5: "AF_SEG_MASK", + } + AnnoFormat_value = map[string]int32{ + "AF_NO_ANNOTATION": 0, + "AF_DET_PASCAL_VOC": 1, + "AF_DET_ARK_JSON": 2, + "AF_DET_LS_JSON": 3, + "AF_SEG_POLYGON": 4, + "AF_SEG_MASK": 5, + } +) + +func (x AnnoFormat) Enum() *AnnoFormat { + p := new(AnnoFormat) + *p = x + return p +} + +func (x AnnoFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AnnoFormat) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[6].Descriptor() +} + +func (AnnoFormat) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[6] +} + +func (x AnnoFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AnnoFormat.Descriptor instead. +func (AnnoFormat) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{6} +} + +type AssetFormat int32 + +const ( + AssetFormat_AF_UNKNOWN AssetFormat = 0 + AssetFormat_AF_RAW AssetFormat = 1 + AssetFormat_AF_LMDB AssetFormat = 2 +) + +// Enum value maps for AssetFormat. +var ( + AssetFormat_name = map[int32]string{ + 0: "AF_UNKNOWN", + 1: "AF_RAW", + 2: "AF_LMDB", + } + AssetFormat_value = map[string]int32{ + "AF_UNKNOWN": 0, + "AF_RAW": 1, + "AF_LMDB": 2, + } +) + +func (x AssetFormat) Enum() *AssetFormat { + p := new(AssetFormat) + *p = x + return p +} + +func (x AssetFormat) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AssetFormat) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[7].Descriptor() +} + +func (AssetFormat) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[7] +} + +func (x AssetFormat) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AssetFormat.Descriptor instead. +func (AssetFormat) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{7} +} + +type AnnoType int32 + +const ( + AnnoType_AT_UNKNOWN AnnoType = 0 + AnnoType_AT_CLASS AnnoType = 1 // Classification with class id, not implemented. + AnnoType_AT_DET_BOX AnnoType = 2 // Detection w. bounding box. + AnnoType_AT_SEG_POLYGON AnnoType = 3 // Semantic Segmentation w. ploygons. + AnnoType_AT_SEG_MASK AnnoType = 4 // Instance Segmentation w. mask. +) + +// Enum value maps for AnnoType. +var ( + AnnoType_name = map[int32]string{ + 0: "AT_UNKNOWN", + 1: "AT_CLASS", + 2: "AT_DET_BOX", + 3: "AT_SEG_POLYGON", + 4: "AT_SEG_MASK", + } + AnnoType_value = map[string]int32{ + "AT_UNKNOWN": 0, + "AT_CLASS": 1, + "AT_DET_BOX": 2, + "AT_SEG_POLYGON": 3, + "AT_SEG_MASK": 4, + } +) + +func (x AnnoType) Enum() *AnnoType { + p := new(AnnoType) + *p = x + return p +} + +func (x AnnoType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AnnoType) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[8].Descriptor() +} + +func (AnnoType) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[8] +} + +func (x AnnoType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AnnoType.Descriptor instead. +func (AnnoType) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{8} +} + +type ConfusionMatrixType int32 + +const ( + ConfusionMatrixType_NotSet ConfusionMatrixType = 0 + ConfusionMatrixType_TP ConfusionMatrixType = 1 + ConfusionMatrixType_FP ConfusionMatrixType = 2 + ConfusionMatrixType_FN ConfusionMatrixType = 3 + ConfusionMatrixType_TN ConfusionMatrixType = 4 + ConfusionMatrixType_Unknown ConfusionMatrixType = 5 + // Matched True Positive, only for gt. + ConfusionMatrixType_MTP ConfusionMatrixType = 11 + ConfusionMatrixType_IGNORED ConfusionMatrixType = 12 +) + +// Enum value maps for ConfusionMatrixType. +var ( + ConfusionMatrixType_name = map[int32]string{ + 0: "NotSet", + 1: "TP", + 2: "FP", + 3: "FN", + 4: "TN", + 5: "Unknown", + 11: "MTP", + 12: "IGNORED", + } + ConfusionMatrixType_value = map[string]int32{ + "NotSet": 0, + "TP": 1, + "FP": 2, + "FN": 3, + "TN": 4, + "Unknown": 5, + "MTP": 11, + "IGNORED": 12, + } +) + +func (x ConfusionMatrixType) Enum() *ConfusionMatrixType { + p := new(ConfusionMatrixType) + *p = x + return p +} + +func (x ConfusionMatrixType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConfusionMatrixType) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[9].Descriptor() +} + +func (ConfusionMatrixType) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[9] +} + +func (x ConfusionMatrixType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConfusionMatrixType.Descriptor instead. +func (ConfusionMatrixType) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{9} +} + +type EvaluationState int32 + +const ( + // evaluate not started + EvaluationState_ES_NOT_SET EvaluationState = 0 + // evaluation result ready to use + EvaluationState_ES_READY EvaluationState = 1 + // evaluation not finished because there's no gt or pred + EvaluationState_ES_NO_GT_OR_PRED EvaluationState = 2 + // evaluation not finished because there're too many images or too many class ids + EvaluationState_ES_EXCEEDS_LIMIT EvaluationState = 3 + // evaluation not finished because there's no evaluate class ids + EvaluationState_ES_NO_CLASS_IDS EvaluationState = 4 +) + +// Enum value maps for EvaluationState. +var ( + EvaluationState_name = map[int32]string{ + 0: "ES_NOT_SET", + 1: "ES_READY", + 2: "ES_NO_GT_OR_PRED", + 3: "ES_EXCEEDS_LIMIT", + 4: "ES_NO_CLASS_IDS", + } + EvaluationState_value = map[string]int32{ + "ES_NOT_SET": 0, + "ES_READY": 1, + "ES_NO_GT_OR_PRED": 2, + "ES_EXCEEDS_LIMIT": 3, + "ES_NO_CLASS_IDS": 4, + } +) + +func (x EvaluationState) Enum() *EvaluationState { + p := new(EvaluationState) + *p = x + return p +} + +func (x EvaluationState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EvaluationState) Descriptor() protoreflect.EnumDescriptor { + return file_mir_command_proto_enumTypes[10].Descriptor() +} + +func (EvaluationState) Type() protoreflect.EnumType { + return &file_mir_command_proto_enumTypes[10] +} + +func (x EvaluationState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EvaluationState.Descriptor instead. +func (EvaluationState) EnumDescriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{10} +} + +// / ========== metadatas.mir ========== +type MirMetadatas struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // / key: asset hash, value: attributes + Attributes map[string]*MetadataAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MirMetadatas) Reset() { + *x = MirMetadatas{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MirMetadatas) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MirMetadatas) ProtoMessage() {} + +func (x *MirMetadatas) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MirMetadatas.ProtoReflect.Descriptor instead. +func (*MirMetadatas) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{0} +} + +func (x *MirMetadatas) GetAttributes() map[string]*MetadataAttributes { + if x != nil { + return x.Attributes + } + return nil +} + +type MetadataAttributes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Timestamp *Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + TvtType TvtType `protobuf:"varint,3,opt,name=tvt_type,json=tvtType,proto3,enum=mir.command.TvtType" json:"tvt_type,omitempty"` + AssetType AssetType `protobuf:"varint,4,opt,name=asset_type,json=assetType,proto3,enum=mir.command.AssetType" json:"asset_type,omitempty"` + Width int32 `protobuf:"varint,5,opt,name=width,proto3" json:"width,omitempty"` /// column number + Height int32 `protobuf:"varint,6,opt,name=height,proto3" json:"height,omitempty"` /// row number + ImageChannels int32 `protobuf:"varint,7,opt,name=image_channels,json=imageChannels,proto3" json:"image_channels,omitempty"` /// (for images) channel count + ByteSize int32 `protobuf:"varint,8,opt,name=byte_size,json=byteSize,proto3" json:"byte_size,omitempty"` + OriginFilename string `protobuf:"bytes,9,opt,name=origin_filename,json=originFilename,proto3" json:"origin_filename,omitempty"` +} + +func (x *MetadataAttributes) Reset() { + *x = MetadataAttributes{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MetadataAttributes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MetadataAttributes) ProtoMessage() {} + +func (x *MetadataAttributes) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MetadataAttributes.ProtoReflect.Descriptor instead. +func (*MetadataAttributes) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{1} +} + +func (x *MetadataAttributes) GetTimestamp() *Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *MetadataAttributes) GetTvtType() TvtType { + if x != nil { + return x.TvtType + } + return TvtType_TvtTypeUnknown +} + +func (x *MetadataAttributes) GetAssetType() AssetType { + if x != nil { + return x.AssetType + } + return AssetType_AssetTypeUnknown +} + +func (x *MetadataAttributes) GetWidth() int32 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *MetadataAttributes) GetHeight() int32 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *MetadataAttributes) GetImageChannels() int32 { + if x != nil { + return x.ImageChannels + } + return 0 +} + +func (x *MetadataAttributes) GetByteSize() int32 { + if x != nil { + return x.ByteSize + } + return 0 +} + +func (x *MetadataAttributes) GetOriginFilename() string { + if x != nil { + return x.OriginFilename + } + return "" +} + +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // / start time stamp, use int32 as int64 is not correctly parsed. + Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // / duration (in seconds), for images, it's always 0 + Duration float32 `protobuf:"fixed32,2,opt,name=duration,proto3" json:"duration,omitempty"` +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{2} +} + +func (x *Timestamp) GetStart() int32 { + if x != nil { + return x.Start + } + return 0 +} + +func (x *Timestamp) GetDuration() float32 { + if x != nil { + return x.Duration + } + return 0 +} + +// / ========== annotations.mir ========== +type MirAnnotations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + GroundTruth *SingleTaskAnnotations `protobuf:"bytes,3,opt,name=ground_truth,json=groundTruth,proto3" json:"ground_truth,omitempty"` + Prediction *SingleTaskAnnotations `protobuf:"bytes,4,opt,name=prediction,proto3" json:"prediction,omitempty"` + // key: asset id, value: cks and image quality, from pred and gt + ImageCks map[string]*SingleImageCks `protobuf:"bytes,5,rep,name=image_cks,json=imageCks,proto3" json:"image_cks,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MirAnnotations) Reset() { + *x = MirAnnotations{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MirAnnotations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MirAnnotations) ProtoMessage() {} + +func (x *MirAnnotations) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MirAnnotations.ProtoReflect.Descriptor instead. +func (*MirAnnotations) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{3} +} + +func (x *MirAnnotations) GetGroundTruth() *SingleTaskAnnotations { + if x != nil { + return x.GroundTruth + } + return nil +} + +func (x *MirAnnotations) GetPrediction() *SingleTaskAnnotations { + if x != nil { + return x.Prediction + } + return nil +} + +func (x *MirAnnotations) GetImageCks() map[string]*SingleImageCks { + if x != nil { + return x.ImageCks + } + return nil +} + +type SingleTaskAnnotations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // / key: image id, value: annotations of that single image + ImageAnnotations map[string]*SingleImageAnnotations `protobuf:"bytes,1,rep,name=image_annotations,json=imageAnnotations,proto3" json:"image_annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TaskId string `protobuf:"bytes,2,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Type AnnoType `protobuf:"varint,3,opt,name=type,proto3,enum=mir.command.AnnoType" json:"type,omitempty"` + // Set of all shown class ids. + TaskClassIds []int32 `protobuf:"varint,4,rep,packed,name=task_class_ids,json=taskClassIds,proto3" json:"task_class_ids,omitempty"` + MapIdColor map[int32]*IntPoint `protobuf:"bytes,5,rep,name=map_id_color,json=mapIdColor,proto3" json:"map_id_color,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // meta infos of this SingleTaskAnnotations + EvalClassIds []int32 `protobuf:"varint,10,rep,packed,name=eval_class_ids,json=evalClassIds,proto3" json:"eval_class_ids,omitempty"` + // model meta info associated with this single_task_annotations + Model *ModelMeta `protobuf:"bytes,11,opt,name=model,proto3" json:"model,omitempty"` + // executor config used to generate this single task annotations + ExecutorConfig string `protobuf:"bytes,12,opt,name=executor_config,json=executorConfig,proto3" json:"executor_config,omitempty"` +} + +func (x *SingleTaskAnnotations) Reset() { + *x = SingleTaskAnnotations{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleTaskAnnotations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleTaskAnnotations) ProtoMessage() {} + +func (x *SingleTaskAnnotations) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleTaskAnnotations.ProtoReflect.Descriptor instead. +func (*SingleTaskAnnotations) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{4} +} + +func (x *SingleTaskAnnotations) GetImageAnnotations() map[string]*SingleImageAnnotations { + if x != nil { + return x.ImageAnnotations + } + return nil +} + +func (x *SingleTaskAnnotations) GetTaskId() string { + if x != nil { + return x.TaskId + } + return "" +} + +func (x *SingleTaskAnnotations) GetType() AnnoType { + if x != nil { + return x.Type + } + return AnnoType_AT_UNKNOWN +} + +func (x *SingleTaskAnnotations) GetTaskClassIds() []int32 { + if x != nil { + return x.TaskClassIds + } + return nil +} + +func (x *SingleTaskAnnotations) GetMapIdColor() map[int32]*IntPoint { + if x != nil { + return x.MapIdColor + } + return nil +} + +func (x *SingleTaskAnnotations) GetEvalClassIds() []int32 { + if x != nil { + return x.EvalClassIds + } + return nil +} + +func (x *SingleTaskAnnotations) GetModel() *ModelMeta { + if x != nil { + return x.Model + } + return nil +} + +func (x *SingleTaskAnnotations) GetExecutorConfig() string { + if x != nil { + return x.ExecutorConfig + } + return "" +} + +type SingleImageAnnotations struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Boxes []*ObjectAnnotation `protobuf:"bytes,2,rep,name=boxes,proto3" json:"boxes,omitempty"` + Polygons []*ObjectAnnotation `protobuf:"bytes,3,rep,name=polygons,proto3" json:"polygons,omitempty"` + Mask *MaskAnnotation `protobuf:"bytes,4,opt,name=mask,proto3" json:"mask,omitempty"` + // Set of class ids shown in this image. + ImgClassIds []int32 `protobuf:"varint,5,rep,packed,name=img_class_ids,json=imgClassIds,proto3" json:"img_class_ids,omitempty"` +} + +func (x *SingleImageAnnotations) Reset() { + *x = SingleImageAnnotations{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleImageAnnotations) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleImageAnnotations) ProtoMessage() {} + +func (x *SingleImageAnnotations) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleImageAnnotations.ProtoReflect.Descriptor instead. +func (*SingleImageAnnotations) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{5} +} + +func (x *SingleImageAnnotations) GetBoxes() []*ObjectAnnotation { + if x != nil { + return x.Boxes + } + return nil +} + +func (x *SingleImageAnnotations) GetPolygons() []*ObjectAnnotation { + if x != nil { + return x.Polygons + } + return nil +} + +func (x *SingleImageAnnotations) GetMask() *MaskAnnotation { + if x != nil { + return x.Mask + } + return nil +} + +func (x *SingleImageAnnotations) GetImgClassIds() []int32 { + if x != nil { + return x.ImgClassIds + } + return nil +} + +type SingleImageCks struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cks map[string]string `protobuf:"bytes,1,rep,name=cks,proto3" json:"cks,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ImageQuality float32 `protobuf:"fixed32,2,opt,name=image_quality,json=imageQuality,proto3" json:"image_quality,omitempty"` +} + +func (x *SingleImageCks) Reset() { + *x = SingleImageCks{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleImageCks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleImageCks) ProtoMessage() {} + +func (x *SingleImageCks) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleImageCks.ProtoReflect.Descriptor instead. +func (*SingleImageCks) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{6} +} + +func (x *SingleImageCks) GetCks() map[string]string { + if x != nil { + return x.Cks + } + return nil +} + +func (x *SingleImageCks) GetImageQuality() float32 { + if x != nil { + return x.ImageQuality + } + return 0 +} + +type MaskAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // PNG image with 3 channels where each pixel corresponds to a class_id. + SemanticMask []byte `protobuf:"bytes,1,opt,name=semantic_mask,json=semanticMask,proto3" json:"semantic_mask,omitempty"` + // PNG image with 3 channels where each pixel corresponds to an object_id. + InstanceMask []byte `protobuf:"bytes,2,opt,name=instance_mask,json=instanceMask,proto3" json:"instance_mask,omitempty"` + ObjectIds []int32 `protobuf:"varint,3,rep,packed,name=object_ids,json=objectIds,proto3" json:"object_ids,omitempty"` +} + +func (x *MaskAnnotation) Reset() { + *x = MaskAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MaskAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MaskAnnotation) ProtoMessage() {} + +func (x *MaskAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MaskAnnotation.ProtoReflect.Descriptor instead. +func (*MaskAnnotation) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{7} +} + +func (x *MaskAnnotation) GetSemanticMask() []byte { + if x != nil { + return x.SemanticMask + } + return nil +} + +func (x *MaskAnnotation) GetInstanceMask() []byte { + if x != nil { + return x.InstanceMask + } + return nil +} + +func (x *MaskAnnotation) GetObjectIds() []int32 { + if x != nil { + return x.ObjectIds + } + return nil +} + +type ObjectAnnotation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Index of this annotation in current single image, may be different from the index in repeated field. + Index int32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Box *Rect `protobuf:"bytes,2,opt,name=box,proto3" json:"box,omitempty"` + ClassId int32 `protobuf:"varint,3,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"` + Score float64 `protobuf:"fixed64,4,opt,name=score,proto3" json:"score,omitempty"` + AnnoQuality float32 `protobuf:"fixed32,5,opt,name=anno_quality,json=annoQuality,proto3" json:"anno_quality,omitempty"` + Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Cm ConfusionMatrixType `protobuf:"varint,7,opt,name=cm,proto3,enum=mir.command.ConfusionMatrixType" json:"cm,omitempty"` + DetLinkId int32 `protobuf:"varint,8,opt,name=det_link_id,json=detLinkId,proto3" json:"det_link_id,omitempty"` + ClassName string `protobuf:"bytes,9,opt,name=class_name,json=className,proto3" json:"class_name,omitempty"` // for data parsed from outside, e.g. inference. + Polygon []*IntPoint `protobuf:"bytes,10,rep,name=polygon,proto3" json:"polygon,omitempty"` +} + +func (x *ObjectAnnotation) Reset() { + *x = ObjectAnnotation{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ObjectAnnotation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ObjectAnnotation) ProtoMessage() {} + +func (x *ObjectAnnotation) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ObjectAnnotation.ProtoReflect.Descriptor instead. +func (*ObjectAnnotation) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{8} +} + +func (x *ObjectAnnotation) GetIndex() int32 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *ObjectAnnotation) GetBox() *Rect { + if x != nil { + return x.Box + } + return nil +} + +func (x *ObjectAnnotation) GetClassId() int32 { + if x != nil { + return x.ClassId + } + return 0 +} + +func (x *ObjectAnnotation) GetScore() float64 { + if x != nil { + return x.Score + } + return 0 +} + +func (x *ObjectAnnotation) GetAnnoQuality() float32 { + if x != nil { + return x.AnnoQuality + } + return 0 +} + +func (x *ObjectAnnotation) GetTags() map[string]string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *ObjectAnnotation) GetCm() ConfusionMatrixType { + if x != nil { + return x.Cm + } + return ConfusionMatrixType_NotSet +} + +func (x *ObjectAnnotation) GetDetLinkId() int32 { + if x != nil { + return x.DetLinkId + } + return 0 +} + +func (x *ObjectAnnotation) GetClassName() string { + if x != nil { + return x.ClassName + } + return "" +} + +func (x *ObjectAnnotation) GetPolygon() []*IntPoint { + if x != nil { + return x.Polygon + } + return nil +} + +type Rect struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"` + Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"` + W int32 `protobuf:"varint,3,opt,name=w,proto3" json:"w,omitempty"` + H int32 `protobuf:"varint,4,opt,name=h,proto3" json:"h,omitempty"` + RotateAngle float32 `protobuf:"fixed32,5,opt,name=rotate_angle,json=rotateAngle,proto3" json:"rotate_angle,omitempty"` // unit in pi. +} + +func (x *Rect) Reset() { + *x = Rect{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Rect) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Rect) ProtoMessage() {} + +func (x *Rect) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Rect.ProtoReflect.Descriptor instead. +func (*Rect) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{9} +} + +func (x *Rect) GetX() int32 { + if x != nil { + return x.X + } + return 0 +} + +func (x *Rect) GetY() int32 { + if x != nil { + return x.Y + } + return 0 +} + +func (x *Rect) GetW() int32 { + if x != nil { + return x.W + } + return 0 +} + +func (x *Rect) GetH() int32 { + if x != nil { + return x.H + } + return 0 +} + +func (x *Rect) GetRotateAngle() float32 { + if x != nil { + return x.RotateAngle + } + return 0 +} + +// / ========== keywords.mir ========== +type MirKeywords struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PredIdx *CiTagToIndex `protobuf:"bytes,7,opt,name=pred_idx,json=predIdx,proto3" json:"pred_idx,omitempty"` // ci to assets, generated from preds + GtIdx *CiTagToIndex `protobuf:"bytes,8,opt,name=gt_idx,json=gtIdx,proto3" json:"gt_idx,omitempty"` // ci to assets, generated from gt + // key: ck main key, value: assets and assets with sub keys, from (mir_annotations.image_cks) pred and gt + CkIdx map[string]*AssetAnnoIndex `protobuf:"bytes,9,rep,name=ck_idx,json=ckIdx,proto3" json:"ck_idx,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MirKeywords) Reset() { + *x = MirKeywords{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MirKeywords) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MirKeywords) ProtoMessage() {} + +func (x *MirKeywords) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MirKeywords.ProtoReflect.Descriptor instead. +func (*MirKeywords) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{10} +} + +func (x *MirKeywords) GetPredIdx() *CiTagToIndex { + if x != nil { + return x.PredIdx + } + return nil +} + +func (x *MirKeywords) GetGtIdx() *CiTagToIndex { + if x != nil { + return x.GtIdx + } + return nil +} + +func (x *MirKeywords) GetCkIdx() map[string]*AssetAnnoIndex { + if x != nil { + return x.CkIdx + } + return nil +} + +type CiTagToIndex struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // key: ci, value: annos + Cis map[int32]*MapStringToInt32List `protobuf:"bytes,1,rep,name=cis,proto3" json:"cis,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // key: ck main key, value: annos and annos with sub keys + Tags map[string]*AssetAnnoIndex `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CiTagToIndex) Reset() { + *x = CiTagToIndex{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CiTagToIndex) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CiTagToIndex) ProtoMessage() {} + +func (x *CiTagToIndex) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CiTagToIndex.ProtoReflect.Descriptor instead. +func (*CiTagToIndex) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{11} +} + +func (x *CiTagToIndex) GetCis() map[int32]*MapStringToInt32List { + if x != nil { + return x.Cis + } + return nil +} + +func (x *CiTagToIndex) GetTags() map[string]*AssetAnnoIndex { + if x != nil { + return x.Tags + } + return nil +} + +type StringList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AssetIds []string `protobuf:"bytes,1,rep,name=asset_ids,json=assetIds,proto3" json:"asset_ids,omitempty"` +} + +func (x *StringList) Reset() { + *x = StringList{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringList) ProtoMessage() {} + +func (x *StringList) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringList.ProtoReflect.Descriptor instead. +func (*StringList) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{12} +} + +func (x *StringList) GetAssetIds() []string { + if x != nil { + return x.AssetIds + } + return nil +} + +type MapStringToInt32List struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyIds map[string]*Int32List `protobuf:"bytes,1,rep,name=key_ids,json=keyIds,proto3" json:"key_ids,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *MapStringToInt32List) Reset() { + *x = MapStringToInt32List{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MapStringToInt32List) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MapStringToInt32List) ProtoMessage() {} + +func (x *MapStringToInt32List) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MapStringToInt32List.ProtoReflect.Descriptor instead. +func (*MapStringToInt32List) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{13} +} + +func (x *MapStringToInt32List) GetKeyIds() map[string]*Int32List { + if x != nil { + return x.KeyIds + } + return nil +} + +type Int32List struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []int32 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"` +} + +func (x *Int32List) Reset() { + *x = Int32List{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Int32List) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Int32List) ProtoMessage() {} + +func (x *Int32List) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Int32List.ProtoReflect.Descriptor instead. +func (*Int32List) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{14} +} + +func (x *Int32List) GetIds() []int32 { + if x != nil { + return x.Ids + } + return nil +} + +type AssetAnnoIndex struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AssetAnnos map[string]*Int32List `protobuf:"bytes,1,rep,name=asset_annos,json=assetAnnos,proto3" json:"asset_annos,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // key: asset id, value: annotation indexes + SubIndexes map[string]*MapStringToInt32List `protobuf:"bytes,2,rep,name=sub_indexes,json=subIndexes,proto3" json:"sub_indexes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // key: ck value, value: asset and it's annotation indexes +} + +func (x *AssetAnnoIndex) Reset() { + *x = AssetAnnoIndex{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AssetAnnoIndex) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssetAnnoIndex) ProtoMessage() {} + +func (x *AssetAnnoIndex) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssetAnnoIndex.ProtoReflect.Descriptor instead. +func (*AssetAnnoIndex) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{15} +} + +func (x *AssetAnnoIndex) GetAssetAnnos() map[string]*Int32List { + if x != nil { + return x.AssetAnnos + } + return nil +} + +func (x *AssetAnnoIndex) GetSubIndexes() map[string]*MapStringToInt32List { + if x != nil { + return x.SubIndexes + } + return nil +} + +// / ========== tasks.mir ========== +type MirTasks struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tasks map[string]*Task `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + HeadTaskId string `protobuf:"bytes,2,opt,name=head_task_id,json=headTaskId,proto3" json:"head_task_id,omitempty"` +} + +func (x *MirTasks) Reset() { + *x = MirTasks{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MirTasks) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MirTasks) ProtoMessage() {} + +func (x *MirTasks) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MirTasks.ProtoReflect.Descriptor instead. +func (*MirTasks) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{16} +} + +func (x *MirTasks) GetTasks() map[string]*Task { + if x != nil { + return x.Tasks + } + return nil +} + +func (x *MirTasks) GetHeadTaskId() string { + if x != nil { + return x.HeadTaskId + } + return "" +} + +type Task struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type TaskType `protobuf:"varint,1,opt,name=type,proto3,enum=mir.command.TaskType" json:"type,omitempty"` + // / user defined task name + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // / auto generated unique id + TaskId string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // / execution time of this task + Timestamp int32 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // RFC 3339 date strings + // / (for training task): result model for cmd train + Model *ModelMeta `protobuf:"bytes,6,opt,name=model,proto3" json:"model,omitempty"` + ReturnCode int32 `protobuf:"varint,8,opt,name=return_code,json=returnCode,proto3" json:"return_code,omitempty"` + ReturnMsg string `protobuf:"bytes,9,opt,name=return_msg,json=returnMsg,proto3" json:"return_msg,omitempty"` + Evaluation *Evaluation `protobuf:"bytes,10,opt,name=evaluation,proto3" json:"evaluation,omitempty"` + // / (for import task): new types for cmd import, key: class name, value: asset count + NewTypes map[string]int32 `protobuf:"bytes,11,rep,name=new_types,json=newTypes,proto3" json:"new_types,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + // / (for import task): reason for new types, True: added, False: ignored + NewTypesAdded bool `protobuf:"varint,12,opt,name=new_types_added,json=newTypesAdded,proto3" json:"new_types_added,omitempty"` + SerializedTaskParameters string `protobuf:"bytes,102,opt,name=serialized_task_parameters,json=serializedTaskParameters,proto3" json:"serialized_task_parameters,omitempty"` + SerializedExecutorConfig string `protobuf:"bytes,103,opt,name=serialized_executor_config,json=serializedExecutorConfig,proto3" json:"serialized_executor_config,omitempty"` + SrcRevs string `protobuf:"bytes,104,opt,name=src_revs,json=srcRevs,proto3" json:"src_revs,omitempty"` + DstRev string `protobuf:"bytes,105,opt,name=dst_rev,json=dstRev,proto3" json:"dst_rev,omitempty"` + Executor string `protobuf:"bytes,106,opt,name=executor,proto3" json:"executor,omitempty"` +} + +func (x *Task) Reset() { + *x = Task{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Task) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Task) ProtoMessage() {} + +func (x *Task) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Task.ProtoReflect.Descriptor instead. +func (*Task) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{17} +} + +func (x *Task) GetType() TaskType { + if x != nil { + return x.Type + } + return TaskType_TaskTypeUnknown +} + +func (x *Task) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Task) GetTaskId() string { + if x != nil { + return x.TaskId + } + return "" +} + +func (x *Task) GetTimestamp() int32 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *Task) GetModel() *ModelMeta { + if x != nil { + return x.Model + } + return nil +} + +func (x *Task) GetReturnCode() int32 { + if x != nil { + return x.ReturnCode + } + return 0 +} + +func (x *Task) GetReturnMsg() string { + if x != nil { + return x.ReturnMsg + } + return "" +} + +func (x *Task) GetEvaluation() *Evaluation { + if x != nil { + return x.Evaluation + } + return nil +} + +func (x *Task) GetNewTypes() map[string]int32 { + if x != nil { + return x.NewTypes + } + return nil +} + +func (x *Task) GetNewTypesAdded() bool { + if x != nil { + return x.NewTypesAdded + } + return false +} + +func (x *Task) GetSerializedTaskParameters() string { + if x != nil { + return x.SerializedTaskParameters + } + return "" +} + +func (x *Task) GetSerializedExecutorConfig() string { + if x != nil { + return x.SerializedExecutorConfig + } + return "" +} + +func (x *Task) GetSrcRevs() string { + if x != nil { + return x.SrcRevs + } + return "" +} + +func (x *Task) GetDstRev() string { + if x != nil { + return x.DstRev + } + return "" +} + +func (x *Task) GetExecutor() string { + if x != nil { + return x.Executor + } + return "" +} + +type ModelMeta struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // / hash for models.tar.gz + ModelHash string `protobuf:"bytes,1,opt,name=model_hash,json=modelHash,proto3" json:"model_hash,omitempty"` + // / model mAP + MeanAveragePrecision float32 `protobuf:"fixed32,2,opt,name=mean_average_precision,json=meanAveragePrecision,proto3" json:"mean_average_precision,omitempty"` + // / context generated by train command + Context string `protobuf:"bytes,3,opt,name=context,proto3" json:"context,omitempty"` + Stages map[string]*ModelStage `protobuf:"bytes,4,rep,name=stages,proto3" json:"stages,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + BestStageName string `protobuf:"bytes,5,opt,name=best_stage_name,json=bestStageName,proto3" json:"best_stage_name,omitempty"` + ClassNames []string `protobuf:"bytes,6,rep,name=class_names,json=classNames,proto3" json:"class_names,omitempty"` +} + +func (x *ModelMeta) Reset() { + *x = ModelMeta{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ModelMeta) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModelMeta) ProtoMessage() {} + +func (x *ModelMeta) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModelMeta.ProtoReflect.Descriptor instead. +func (*ModelMeta) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{18} +} + +func (x *ModelMeta) GetModelHash() string { + if x != nil { + return x.ModelHash + } + return "" +} + +func (x *ModelMeta) GetMeanAveragePrecision() float32 { + if x != nil { + return x.MeanAveragePrecision + } + return 0 +} + +func (x *ModelMeta) GetContext() string { + if x != nil { + return x.Context + } + return "" +} + +func (x *ModelMeta) GetStages() map[string]*ModelStage { + if x != nil { + return x.Stages + } + return nil +} + +func (x *ModelMeta) GetBestStageName() string { + if x != nil { + return x.BestStageName + } + return "" +} + +func (x *ModelMeta) GetClassNames() []string { + if x != nil { + return x.ClassNames + } + return nil +} + +type ModelStage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StageName string `protobuf:"bytes,1,opt,name=stage_name,json=stageName,proto3" json:"stage_name,omitempty"` + Files []string `protobuf:"bytes,2,rep,name=files,proto3" json:"files,omitempty"` + Timestamp int32 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + MAP float32 `protobuf:"fixed32,4,opt,name=mAP,proto3" json:"mAP,omitempty"` +} + +func (x *ModelStage) Reset() { + *x = ModelStage{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ModelStage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModelStage) ProtoMessage() {} + +func (x *ModelStage) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModelStage.ProtoReflect.Descriptor instead. +func (*ModelStage) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{19} +} + +func (x *ModelStage) GetStageName() string { + if x != nil { + return x.StageName + } + return "" +} + +func (x *ModelStage) GetFiles() []string { + if x != nil { + return x.Files + } + return nil +} + +func (x *ModelStage) GetTimestamp() int32 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *ModelStage) GetMAP() float32 { + if x != nil { + return x.MAP + } + return 0 +} + +type Evaluation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config *EvaluateConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + DatasetEvaluation *SingleDatasetEvaluation `protobuf:"bytes,3,opt,name=dataset_evaluation,json=datasetEvaluation,proto3" json:"dataset_evaluation,omitempty"` + MainCk *SingleDatasetEvaluation `protobuf:"bytes,4,opt,name=main_ck,json=mainCk,proto3" json:"main_ck,omitempty"` + SubCks map[string]*SingleDatasetEvaluation `protobuf:"bytes,5,rep,name=sub_cks,json=subCks,proto3" json:"sub_cks,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + State EvaluationState `protobuf:"varint,6,opt,name=state,proto3,enum=mir.command.EvaluationState" json:"state,omitempty"` +} + +func (x *Evaluation) Reset() { + *x = Evaluation{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Evaluation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Evaluation) ProtoMessage() {} + +func (x *Evaluation) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Evaluation.ProtoReflect.Descriptor instead. +func (*Evaluation) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{20} +} + +func (x *Evaluation) GetConfig() *EvaluateConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *Evaluation) GetDatasetEvaluation() *SingleDatasetEvaluation { + if x != nil { + return x.DatasetEvaluation + } + return nil +} + +func (x *Evaluation) GetMainCk() *SingleDatasetEvaluation { + if x != nil { + return x.MainCk + } + return nil +} + +func (x *Evaluation) GetSubCks() map[string]*SingleDatasetEvaluation { + if x != nil { + return x.SubCks + } + return nil +} + +func (x *Evaluation) GetState() EvaluationState { + if x != nil { + return x.State + } + return EvaluationState_ES_NOT_SET +} + +type EvaluateConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfThr float32 `protobuf:"fixed32,3,opt,name=conf_thr,json=confThr,proto3" json:"conf_thr,omitempty"` + IouThrsInterval string `protobuf:"bytes,4,opt,name=iou_thrs_interval,json=iouThrsInterval,proto3" json:"iou_thrs_interval,omitempty"` + NeedPrCurve bool `protobuf:"varint,5,opt,name=need_pr_curve,json=needPrCurve,proto3" json:"need_pr_curve,omitempty"` + ClassIds []int32 `protobuf:"varint,7,rep,packed,name=class_ids,json=classIds,proto3" json:"class_ids,omitempty"` + MainCk string `protobuf:"bytes,8,opt,name=main_ck,json=mainCk,proto3" json:"main_ck,omitempty"` +} + +func (x *EvaluateConfig) Reset() { + *x = EvaluateConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EvaluateConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EvaluateConfig) ProtoMessage() {} + +func (x *EvaluateConfig) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EvaluateConfig.ProtoReflect.Descriptor instead. +func (*EvaluateConfig) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{21} +} + +func (x *EvaluateConfig) GetConfThr() float32 { + if x != nil { + return x.ConfThr + } + return 0 +} + +func (x *EvaluateConfig) GetIouThrsInterval() string { + if x != nil { + return x.IouThrsInterval + } + return "" +} + +func (x *EvaluateConfig) GetNeedPrCurve() bool { + if x != nil { + return x.NeedPrCurve + } + return false +} + +func (x *EvaluateConfig) GetClassIds() []int32 { + if x != nil { + return x.ClassIds + } + return nil +} + +func (x *EvaluateConfig) GetMainCk() string { + if x != nil { + return x.MainCk + } + return "" +} + +type SingleDatasetEvaluation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConfThr float32 `protobuf:"fixed32,1,opt,name=conf_thr,json=confThr,proto3" json:"conf_thr,omitempty"` + IouEvaluations map[string]*SingleIouEvaluation `protobuf:"bytes,4,rep,name=iou_evaluations,json=iouEvaluations,proto3" json:"iou_evaluations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // key: string of iou threshold + IouAveragedEvaluation *SingleIouEvaluation `protobuf:"bytes,5,opt,name=iou_averaged_evaluation,json=iouAveragedEvaluation,proto3" json:"iou_averaged_evaluation,omitempty"` // average for all ious +} + +func (x *SingleDatasetEvaluation) Reset() { + *x = SingleDatasetEvaluation{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleDatasetEvaluation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleDatasetEvaluation) ProtoMessage() {} + +func (x *SingleDatasetEvaluation) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleDatasetEvaluation.ProtoReflect.Descriptor instead. +func (*SingleDatasetEvaluation) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{22} +} + +func (x *SingleDatasetEvaluation) GetConfThr() float32 { + if x != nil { + return x.ConfThr + } + return 0 +} + +func (x *SingleDatasetEvaluation) GetIouEvaluations() map[string]*SingleIouEvaluation { + if x != nil { + return x.IouEvaluations + } + return nil +} + +func (x *SingleDatasetEvaluation) GetIouAveragedEvaluation() *SingleIouEvaluation { + if x != nil { + return x.IouAveragedEvaluation + } + return nil +} + +type SingleIouEvaluation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CiEvaluations map[int32]*SingleEvaluationElement `protobuf:"bytes,1,rep,name=ci_evaluations,json=ciEvaluations,proto3" json:"ci_evaluations,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // key: class ids + CiAveragedEvaluation *SingleEvaluationElement `protobuf:"bytes,2,opt,name=ci_averaged_evaluation,json=ciAveragedEvaluation,proto3" json:"ci_averaged_evaluation,omitempty"` // evaluations averaged by class ids +} + +func (x *SingleIouEvaluation) Reset() { + *x = SingleIouEvaluation{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleIouEvaluation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleIouEvaluation) ProtoMessage() {} + +func (x *SingleIouEvaluation) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleIouEvaluation.ProtoReflect.Descriptor instead. +func (*SingleIouEvaluation) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{23} +} + +func (x *SingleIouEvaluation) GetCiEvaluations() map[int32]*SingleEvaluationElement { + if x != nil { + return x.CiEvaluations + } + return nil +} + +func (x *SingleIouEvaluation) GetCiAveragedEvaluation() *SingleEvaluationElement { + if x != nil { + return x.CiAveragedEvaluation + } + return nil +} + +type SingleEvaluationElement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ap float32 `protobuf:"fixed32,1,opt,name=ap,proto3" json:"ap,omitempty"` + Ar float32 `protobuf:"fixed32,2,opt,name=ar,proto3" json:"ar,omitempty"` + Tp int32 `protobuf:"varint,3,opt,name=tp,proto3" json:"tp,omitempty"` + Fp int32 `protobuf:"varint,4,opt,name=fp,proto3" json:"fp,omitempty"` + Fn int32 `protobuf:"varint,5,opt,name=fn,proto3" json:"fn,omitempty"` + PrCurve []*FloatPoint `protobuf:"bytes,6,rep,name=pr_curve,json=prCurve,proto3" json:"pr_curve,omitempty"` +} + +func (x *SingleEvaluationElement) Reset() { + *x = SingleEvaluationElement{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleEvaluationElement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleEvaluationElement) ProtoMessage() {} + +func (x *SingleEvaluationElement) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleEvaluationElement.ProtoReflect.Descriptor instead. +func (*SingleEvaluationElement) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{24} +} + +func (x *SingleEvaluationElement) GetAp() float32 { + if x != nil { + return x.Ap + } + return 0 +} + +func (x *SingleEvaluationElement) GetAr() float32 { + if x != nil { + return x.Ar + } + return 0 +} + +func (x *SingleEvaluationElement) GetTp() int32 { + if x != nil { + return x.Tp + } + return 0 +} + +func (x *SingleEvaluationElement) GetFp() int32 { + if x != nil { + return x.Fp + } + return 0 +} + +func (x *SingleEvaluationElement) GetFn() int32 { + if x != nil { + return x.Fn + } + return 0 +} + +func (x *SingleEvaluationElement) GetPrCurve() []*FloatPoint { + if x != nil { + return x.PrCurve + } + return nil +} + +type IntPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + X int32 `protobuf:"varint,1,opt,name=x,proto3" json:"x,omitempty"` + Y int32 `protobuf:"varint,2,opt,name=y,proto3" json:"y,omitempty"` + Z int32 `protobuf:"varint,3,opt,name=z,proto3" json:"z,omitempty"` +} + +func (x *IntPoint) Reset() { + *x = IntPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *IntPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*IntPoint) ProtoMessage() {} + +func (x *IntPoint) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use IntPoint.ProtoReflect.Descriptor instead. +func (*IntPoint) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{25} +} + +func (x *IntPoint) GetX() int32 { + if x != nil { + return x.X + } + return 0 +} + +func (x *IntPoint) GetY() int32 { + if x != nil { + return x.Y + } + return 0 +} + +func (x *IntPoint) GetZ() int32 { + if x != nil { + return x.Z + } + return 0 +} + +type FloatPoint struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + X float32 `protobuf:"fixed32,1,opt,name=x,proto3" json:"x,omitempty"` + Y float32 `protobuf:"fixed32,2,opt,name=y,proto3" json:"y,omitempty"` + Z float32 `protobuf:"fixed32,3,opt,name=z,proto3" json:"z,omitempty"` +} + +func (x *FloatPoint) Reset() { + *x = FloatPoint{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FloatPoint) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FloatPoint) ProtoMessage() {} + +func (x *FloatPoint) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FloatPoint.ProtoReflect.Descriptor instead. +func (*FloatPoint) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{26} +} + +func (x *FloatPoint) GetX() float32 { + if x != nil { + return x.X + } + return 0 +} + +func (x *FloatPoint) GetY() float32 { + if x != nil { + return x.Y + } + return 0 +} + +func (x *FloatPoint) GetZ() float32 { + if x != nil { + return x.Z + } + return 0 +} + +// / ========== context.mir ========== +type MirContext struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // / total images count + ImagesCnt int32 `protobuf:"varint,1,opt,name=images_cnt,json=imagesCnt,proto3" json:"images_cnt,omitempty"` + // / from pred and gt + CksCnt map[string]*SingleMapCount `protobuf:"bytes,6,rep,name=cks_cnt,json=cksCnt,proto3" json:"cks_cnt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TotalAssetMbytes int32 `protobuf:"varint,11,opt,name=total_asset_mbytes,json=totalAssetMbytes,proto3" json:"total_asset_mbytes,omitempty"` + PredStats *AnnoStats `protobuf:"bytes,100,opt,name=pred_stats,json=predStats,proto3" json:"pred_stats,omitempty"` + GtStats *AnnoStats `protobuf:"bytes,101,opt,name=gt_stats,json=gtStats,proto3" json:"gt_stats,omitempty"` +} + +func (x *MirContext) Reset() { + *x = MirContext{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MirContext) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MirContext) ProtoMessage() {} + +func (x *MirContext) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MirContext.ProtoReflect.Descriptor instead. +func (*MirContext) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{27} +} + +func (x *MirContext) GetImagesCnt() int32 { + if x != nil { + return x.ImagesCnt + } + return 0 +} + +func (x *MirContext) GetCksCnt() map[string]*SingleMapCount { + if x != nil { + return x.CksCnt + } + return nil +} + +func (x *MirContext) GetTotalAssetMbytes() int32 { + if x != nil { + return x.TotalAssetMbytes + } + return 0 +} + +func (x *MirContext) GetPredStats() *AnnoStats { + if x != nil { + return x.PredStats + } + return nil +} + +func (x *MirContext) GetGtStats() *AnnoStats { + if x != nil { + return x.GtStats + } + return nil +} + +type SingleMapCount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cnt int32 `protobuf:"varint,1,opt,name=cnt,proto3" json:"cnt,omitempty"` + SubCnt map[string]int32 `protobuf:"bytes,2,rep,name=sub_cnt,json=subCnt,proto3" json:"sub_cnt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` +} + +func (x *SingleMapCount) Reset() { + *x = SingleMapCount{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SingleMapCount) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SingleMapCount) ProtoMessage() {} + +func (x *SingleMapCount) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SingleMapCount.ProtoReflect.Descriptor instead. +func (*SingleMapCount) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{28} +} + +func (x *SingleMapCount) GetCnt() int32 { + if x != nil { + return x.Cnt + } + return 0 +} + +func (x *SingleMapCount) GetSubCnt() map[string]int32 { + if x != nil { + return x.SubCnt + } + return nil +} + +type AnnoStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TotalCnt int32 `protobuf:"varint,1,opt,name=total_cnt,json=totalCnt,proto3" json:"total_cnt,omitempty"` + PositiveAssetCnt int32 `protobuf:"varint,2,opt,name=positive_asset_cnt,json=positiveAssetCnt,proto3" json:"positive_asset_cnt,omitempty"` + NegativeAssetCnt int32 `protobuf:"varint,3,opt,name=negative_asset_cnt,json=negativeAssetCnt,proto3" json:"negative_asset_cnt,omitempty"` + TagsCnt map[string]*SingleMapCount `protobuf:"bytes,7,rep,name=tags_cnt,json=tagsCnt,proto3" json:"tags_cnt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // key: main tag name, value: main tag count and sub tag names and counts + ClassIdsCnt map[int32]int32 `protobuf:"bytes,8,rep,name=class_ids_cnt,json=classIdsCnt,proto3" json:"class_ids_cnt,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` // key: class ids, value: asset count for this class id + // Shortcut of class_ids for evaluation (dup. field as in SingleTaskAnnotations). + EvalClassIds []int32 `protobuf:"varint,9,rep,packed,name=eval_class_ids,json=evalClassIds,proto3" json:"eval_class_ids,omitempty"` +} + +func (x *AnnoStats) Reset() { + *x = AnnoStats{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnnoStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnnoStats) ProtoMessage() {} + +func (x *AnnoStats) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnnoStats.ProtoReflect.Descriptor instead. +func (*AnnoStats) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{29} +} + +func (x *AnnoStats) GetTotalCnt() int32 { + if x != nil { + return x.TotalCnt + } + return 0 +} + +func (x *AnnoStats) GetPositiveAssetCnt() int32 { + if x != nil { + return x.PositiveAssetCnt + } + return 0 +} + +func (x *AnnoStats) GetNegativeAssetCnt() int32 { + if x != nil { + return x.NegativeAssetCnt + } + return 0 +} + +func (x *AnnoStats) GetTagsCnt() map[string]*SingleMapCount { + if x != nil { + return x.TagsCnt + } + return nil +} + +func (x *AnnoStats) GetClassIdsCnt() map[int32]int32 { + if x != nil { + return x.ClassIdsCnt + } + return nil +} + +func (x *AnnoStats) GetEvalClassIds() []int32 { + if x != nil { + return x.EvalClassIds + } + return nil +} + +type ExportConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Asset config. + AssetFormat AssetFormat `protobuf:"varint,1,opt,name=asset_format,json=assetFormat,proto3,enum=mir.command.AssetFormat" json:"asset_format,omitempty"` + AssetDir string `protobuf:"bytes,2,opt,name=asset_dir,json=assetDir,proto3" json:"asset_dir,omitempty"` + AssetIndexFile string `protobuf:"bytes,3,opt,name=asset_index_file,json=assetIndexFile,proto3" json:"asset_index_file,omitempty"` + // Index file writes abs path. In TMI case, path should be converted, e.g. /in/assets. + AssetIndexPrefix string `protobuf:"bytes,4,opt,name=asset_index_prefix,json=assetIndexPrefix,proto3" json:"asset_index_prefix,omitempty"` + MediaLocation string `protobuf:"bytes,5,opt,name=media_location,json=mediaLocation,proto3" json:"media_location,omitempty"` + NeedSubFolder bool `protobuf:"varint,6,opt,name=need_sub_folder,json=needSubFolder,proto3" json:"need_sub_folder,omitempty"` + // Annotation config. + AnnoFormat AnnoFormat `protobuf:"varint,50,opt,name=anno_format,json=annoFormat,proto3,enum=mir.command.AnnoFormat" json:"anno_format,omitempty"` + GtDir string `protobuf:"bytes,51,opt,name=gt_dir,json=gtDir,proto3" json:"gt_dir,omitempty"` + GtIndexFile string `protobuf:"bytes,52,opt,name=gt_index_file,json=gtIndexFile,proto3" json:"gt_index_file,omitempty"` + GtIndexPrefix string `protobuf:"bytes,53,opt,name=gt_index_prefix,json=gtIndexPrefix,proto3" json:"gt_index_prefix,omitempty"` + PredDir string `protobuf:"bytes,54,opt,name=pred_dir,json=predDir,proto3" json:"pred_dir,omitempty"` + PredIndexFile string `protobuf:"bytes,55,opt,name=pred_index_file,json=predIndexFile,proto3" json:"pred_index_file,omitempty"` + PredIndexPrefix string `protobuf:"bytes,56,opt,name=pred_index_prefix,json=predIndexPrefix,proto3" json:"pred_index_prefix,omitempty"` + TvtIndexDir string `protobuf:"bytes,57,opt,name=tvt_index_dir,json=tvtIndexDir,proto3" json:"tvt_index_dir,omitempty"` +} + +func (x *ExportConfig) Reset() { + *x = ExportConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_mir_command_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportConfig) ProtoMessage() {} + +func (x *ExportConfig) ProtoReflect() protoreflect.Message { + mi := &file_mir_command_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportConfig.ProtoReflect.Descriptor instead. +func (*ExportConfig) Descriptor() ([]byte, []int) { + return file_mir_command_proto_rawDescGZIP(), []int{30} +} + +func (x *ExportConfig) GetAssetFormat() AssetFormat { + if x != nil { + return x.AssetFormat + } + return AssetFormat_AF_UNKNOWN +} + +func (x *ExportConfig) GetAssetDir() string { + if x != nil { + return x.AssetDir + } + return "" +} + +func (x *ExportConfig) GetAssetIndexFile() string { + if x != nil { + return x.AssetIndexFile + } + return "" +} + +func (x *ExportConfig) GetAssetIndexPrefix() string { + if x != nil { + return x.AssetIndexPrefix + } + return "" +} + +func (x *ExportConfig) GetMediaLocation() string { + if x != nil { + return x.MediaLocation + } + return "" +} + +func (x *ExportConfig) GetNeedSubFolder() bool { + if x != nil { + return x.NeedSubFolder + } + return false +} + +func (x *ExportConfig) GetAnnoFormat() AnnoFormat { + if x != nil { + return x.AnnoFormat + } + return AnnoFormat_AF_NO_ANNOTATION +} + +func (x *ExportConfig) GetGtDir() string { + if x != nil { + return x.GtDir + } + return "" +} + +func (x *ExportConfig) GetGtIndexFile() string { + if x != nil { + return x.GtIndexFile + } + return "" +} + +func (x *ExportConfig) GetGtIndexPrefix() string { + if x != nil { + return x.GtIndexPrefix + } + return "" +} + +func (x *ExportConfig) GetPredDir() string { + if x != nil { + return x.PredDir + } + return "" +} + +func (x *ExportConfig) GetPredIndexFile() string { + if x != nil { + return x.PredIndexFile + } + return "" +} + +func (x *ExportConfig) GetPredIndexPrefix() string { + if x != nil { + return x.PredIndexPrefix + } + return "" +} + +func (x *ExportConfig) GetTvtIndexDir() string { + if x != nil { + return x.TvtIndexDir + } + return "" +} + +var File_mir_command_proto protoreflect.FileDescriptor + +var file_mir_command_proto_rawDesc = []byte{ + 0x0a, 0x11, 0x6d, 0x69, 0x72, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x22, 0xb9, 0x01, 0x0a, 0x0c, 0x4d, 0x69, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x12, 0x49, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x69, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x5e, 0x0a, 0x0f, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd3, 0x02, 0x0a, + 0x12, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2f, 0x0a, 0x08, 0x74, 0x76, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x76, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x07, 0x74, 0x76, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x35, 0x0a, 0x0a, 0x61, 0x73, + 0x73, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, + 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x73, 0x73, + 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, + 0x25, 0x0a, 0x0e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x79, 0x74, 0x65, 0x5f, 0x73, + 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x62, 0x79, 0x74, 0x65, 0x53, + 0x69, 0x7a, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x66, 0x69, + 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x4a, 0x04, 0x08, 0x01, + 0x10, 0x02, 0x22, 0x3d, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0xc9, 0x02, 0x0a, 0x0e, 0x4d, 0x69, 0x72, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0c, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x74, + 0x72, 0x75, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0b, + 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x72, 0x75, 0x74, 0x68, 0x12, 0x42, 0x0a, 0x0a, 0x70, + 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x46, 0x0a, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x2e, 0x4d, 0x69, 0x72, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6b, 0x73, 0x1a, 0x58, 0x0a, 0x0d, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x43, 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x43, 0x6b, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0xfb, 0x04, + 0x0a, 0x15, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x65, 0x0a, 0x11, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x5f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x17, + 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x6d, 0x61, 0x70, 0x5f, + 0x69, 0x64, 0x5f, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, + 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, + 0x67, 0x6c, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x4d, 0x61, 0x70, 0x49, 0x64, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x61, 0x70, 0x49, 0x64, 0x43, 0x6f, 0x6c, 0x6f, 0x72, 0x12, 0x24, + 0x0a, 0x0e, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x0a, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x61, 0x6c, 0x43, 0x6c, 0x61, 0x73, + 0x73, 0x49, 0x64, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x6d, 0x6f, 0x64, + 0x65, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x68, 0x0a, 0x15, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x54, 0x0a, 0x0f, 0x4d, 0x61, 0x70, 0x49, 0x64, 0x43, 0x6f, + 0x6c, 0x6f, 0x72, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe3, 0x01, 0x0a, 0x16, + 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x62, 0x6f, 0x78, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x08, 0x70, + 0x6f, 0x6c, 0x79, 0x67, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x70, 0x6f, + 0x6c, 0x79, 0x67, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x2e, 0x4d, 0x61, 0x73, 0x6b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x04, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x6d, 0x67, 0x5f, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, + 0x69, 0x6d, 0x67, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, + 0x02, 0x22, 0xa5, 0x01, 0x0a, 0x0e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x43, 0x6b, 0x73, 0x12, 0x36, 0x0a, 0x03, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, + 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6b, 0x73, 0x2e, 0x43, + 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x63, 0x6b, 0x73, 0x12, 0x23, 0x0a, 0x0d, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x71, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x02, 0x52, 0x0c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, + 0x79, 0x1a, 0x36, 0x0a, 0x08, 0x43, 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x79, 0x0a, 0x0e, 0x4d, 0x61, 0x73, + 0x6b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, + 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x4d, 0x61, 0x73, 0x6b, + 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x09, 0x6f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x49, 0x64, 0x73, 0x22, 0xb9, 0x03, 0x0a, 0x10, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x23, 0x0a, 0x03, 0x62, 0x6f, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, + 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x63, 0x74, 0x52, + 0x03, 0x62, 0x6f, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, + 0x73, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x6e, 0x6e, 0x6f, 0x5f, 0x71, 0x75, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x30, 0x0a, 0x02, 0x63, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x20, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, + 0x43, 0x6f, 0x6e, 0x66, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x72, 0x69, 0x78, 0x54, + 0x79, 0x70, 0x65, 0x52, 0x02, 0x63, 0x6d, 0x12, 0x1e, 0x0a, 0x0b, 0x64, 0x65, 0x74, 0x5f, 0x6c, + 0x69, 0x6e, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, 0x65, + 0x74, 0x4c, 0x69, 0x6e, 0x6b, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x61, 0x73, 0x73, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x61, + 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x70, 0x6f, 0x6c, 0x79, 0x67, 0x6f, + 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x07, + 0x70, 0x6f, 0x6c, 0x79, 0x67, 0x6f, 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x61, 0x0a, 0x04, 0x52, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x78, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x01, 0x78, 0x12, 0x0c, 0x0a, 0x01, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x01, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x77, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x01, 0x77, 0x12, 0x0c, 0x0a, 0x01, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x01, 0x68, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x6e, 0x67, 0x6c, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0b, 0x72, 0x6f, 0x74, 0x61, 0x74, 0x65, 0x41, 0x6e, + 0x67, 0x6c, 0x65, 0x22, 0xac, 0x02, 0x0a, 0x0b, 0x4d, 0x69, 0x72, 0x4b, 0x65, 0x79, 0x77, 0x6f, + 0x72, 0x64, 0x73, 0x12, 0x34, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x78, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x43, 0x69, 0x54, 0x61, 0x67, 0x54, 0x6f, 0x49, 0x6e, 0x64, 0x65, 0x78, + 0x52, 0x07, 0x70, 0x72, 0x65, 0x64, 0x49, 0x64, 0x78, 0x12, 0x30, 0x0a, 0x06, 0x67, 0x74, 0x5f, + 0x69, 0x64, 0x78, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6d, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x43, 0x69, 0x54, 0x61, 0x67, 0x54, 0x6f, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x52, 0x05, 0x67, 0x74, 0x49, 0x64, 0x78, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6b, 0x5f, 0x69, 0x64, 0x78, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x69, 0x72, 0x4b, 0x65, 0x79, + 0x77, 0x6f, 0x72, 0x64, 0x73, 0x2e, 0x43, 0x6b, 0x49, 0x64, 0x78, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x05, 0x63, 0x6b, 0x49, 0x64, 0x78, 0x1a, 0x55, 0x0a, 0x0a, 0x43, 0x6b, 0x49, 0x64, 0x78, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, + 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, + 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, + 0x10, 0x07, 0x22, 0xae, 0x02, 0x0a, 0x0c, 0x43, 0x69, 0x54, 0x61, 0x67, 0x54, 0x6f, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x12, 0x34, 0x0a, 0x03, 0x63, 0x69, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x43, + 0x69, 0x54, 0x61, 0x67, 0x54, 0x6f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x69, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x03, 0x63, 0x69, 0x73, 0x12, 0x37, 0x0a, 0x04, 0x74, 0x61, 0x67, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x43, 0x69, 0x54, 0x61, 0x67, 0x54, 0x6f, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x74, 0x61, + 0x67, 0x73, 0x1a, 0x59, 0x0a, 0x08, 0x43, 0x69, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x61, + 0x70, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x54, 0x0a, + 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, + 0x6e, 0x6e, 0x6f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x29, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x61, 0x73, 0x73, 0x65, 0x74, 0x49, 0x64, 0x73, 0x22, 0xb1, + 0x01, 0x0a, 0x14, 0x4d, 0x61, 0x70, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x69, + 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x61, 0x70, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x54, 0x6f, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x49, + 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x49, 0x64, 0x73, 0x1a, + 0x51, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x49, 0x6e, + 0x74, 0x33, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x1d, 0x0a, 0x09, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x12, + 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x52, 0x03, 0x69, 0x64, + 0x73, 0x22, 0xe5, 0x02, 0x0a, 0x0e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x61, 0x6e, + 0x6e, 0x6f, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, + 0x6f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, 0x6f, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, + 0x6f, 0x73, 0x12, 0x4c, 0x0a, 0x0b, 0x73, 0x75, 0x62, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x75, 0x62, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x73, 0x75, 0x62, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x73, + 0x1a, 0x55, 0x0a, 0x0f, 0x41, 0x73, 0x73, 0x65, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x60, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x37, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x61, 0x70, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb1, 0x01, 0x0a, 0x08, 0x4d, 0x69, + 0x72, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x36, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x69, 0x72, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x54, 0x61, 0x73, + 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x20, + 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x64, + 0x1a, 0x4b, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x11, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xaa, 0x05, + 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x29, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, + 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x05, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x4d, + 0x65, 0x74, 0x61, 0x52, 0x05, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0a, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x6d, 0x73, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x4d, 0x73, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x65, 0x76, + 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x61, + 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x2e, 0x4e, 0x65, 0x77, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x77, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x61, + 0x64, 0x64, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, 0x65, 0x77, 0x54, + 0x79, 0x70, 0x65, 0x73, 0x41, 0x64, 0x64, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x73, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x73, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x69, 0x61, + 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x73, 0x65, 0x72, + 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x72, 0x63, 0x5f, 0x72, 0x65, 0x76, + 0x73, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x72, 0x63, 0x52, 0x65, 0x76, 0x73, + 0x12, 0x17, 0x0a, 0x07, 0x64, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x76, 0x18, 0x69, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x73, 0x74, 0x52, 0x65, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x1a, 0x3b, 0x0a, 0x0d, 0x4e, 0x65, 0x77, 0x54, 0x79, 0x70, 0x65, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, + 0x08, 0x64, 0x10, 0x65, 0x4a, 0x04, 0x08, 0x65, 0x10, 0x66, 0x22, 0xd3, 0x02, 0x0a, 0x09, 0x4d, + 0x6f, 0x64, 0x65, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, + 0x64, 0x65, 0x6c, 0x48, 0x61, 0x73, 0x68, 0x12, 0x34, 0x0a, 0x16, 0x6d, 0x65, 0x61, 0x6e, 0x5f, + 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x14, 0x6d, 0x65, 0x61, 0x6e, 0x41, 0x76, 0x65, + 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x3a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x4d, 0x65, 0x74, 0x61, 0x2e, + 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x67, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x62, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x65, + 0x73, 0x74, 0x53, 0x74, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x52, 0x0a, 0x0b, + 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2d, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, + 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, + 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x71, 0x0a, 0x0a, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x6c, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x41, 0x50, 0x18, 0x04, 0x20, 0x01, 0x28, 0x02, 0x52, 0x03, + 0x6d, 0x41, 0x50, 0x22, 0xae, 0x03, 0x0a, 0x0a, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, + 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x53, 0x0a, 0x12, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x45, + 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x07, + 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, + 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6b, 0x12, 0x3c, 0x0a, 0x07, 0x73, + 0x75, 0x62, 0x5f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, + 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x75, 0x62, 0x43, 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x73, 0x75, 0x62, 0x43, 0x6b, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x5f, 0x0a, + 0x0b, 0x53, 0x75, 0x62, 0x43, 0x6b, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, + 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x22, 0xc3, 0x01, 0x0a, 0x0e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x5f, + 0x74, 0x68, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x54, + 0x68, 0x72, 0x12, 0x2a, 0x0a, 0x11, 0x69, 0x6f, 0x75, 0x5f, 0x74, 0x68, 0x72, 0x73, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, + 0x6f, 0x75, 0x54, 0x68, 0x72, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x22, + 0x0a, 0x0d, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x5f, 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x65, 0x65, 0x64, 0x50, 0x72, 0x43, 0x75, 0x72, + 0x76, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x73, 0x18, + 0x07, 0x20, 0x03, 0x28, 0x05, 0x52, 0x08, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x12, + 0x17, 0x0a, 0x07, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x63, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6b, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, + 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x22, 0xe2, 0x02, 0x0a, 0x17, 0x53, + 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x45, 0x76, 0x61, 0x6c, + 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x5f, 0x74, + 0x68, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x54, 0x68, + 0x72, 0x12, 0x61, 0x0a, 0x0f, 0x69, 0x6f, 0x75, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x6d, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x49, 0x6f, 0x75, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x69, 0x6f, 0x75, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x17, 0x69, 0x6f, 0x75, 0x5f, 0x61, 0x76, 0x65, 0x72, + 0x61, 0x67, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6f, 0x75, 0x45, 0x76, 0x61, + 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x69, 0x6f, 0x75, 0x41, 0x76, 0x65, 0x72, + 0x61, 0x67, 0x65, 0x64, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x63, + 0x0a, 0x13, 0x49, 0x6f, 0x75, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6f, 0x75, 0x45, 0x76, + 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, + 0xbb, 0x02, 0x0a, 0x13, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6f, 0x75, 0x45, 0x76, 0x61, + 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x0e, 0x63, 0x69, 0x5f, 0x65, 0x76, + 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x49, 0x6f, 0x75, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x43, 0x69, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x69, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x16, 0x63, 0x69, 0x5f, 0x61, 0x76, 0x65, 0x72, 0x61, 0x67, + 0x65, 0x64, 0x5f, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x14, 0x63, 0x69, 0x41, 0x76, 0x65, + 0x72, 0x61, 0x67, 0x65, 0x64, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, + 0x66, 0x0a, 0x12, 0x43, 0x69, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x45, 0x76, 0x61, 0x6c, 0x75, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0x9d, 0x01, + 0x0a, 0x17, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x61, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x02, 0x61, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x61, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x02, 0x61, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x70, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x74, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x66, 0x70, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x66, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x66, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x66, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x70, 0x72, 0x5f, + 0x63, 0x75, 0x72, 0x76, 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x50, + 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x07, 0x70, 0x72, 0x43, 0x75, 0x72, 0x76, 0x65, 0x22, 0x34, 0x0a, + 0x08, 0x49, 0x6e, 0x74, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x01, 0x78, 0x12, 0x0c, 0x0a, 0x01, 0x79, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x01, 0x79, 0x12, 0x0c, 0x0a, 0x01, 0x7a, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x01, 0x7a, 0x22, 0x36, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x50, 0x6f, 0x69, 0x6e, + 0x74, 0x12, 0x0c, 0x0a, 0x01, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x02, 0x52, 0x01, 0x78, 0x12, + 0x0c, 0x0a, 0x01, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x01, 0x79, 0x12, 0x0c, 0x0a, + 0x01, 0x7a, 0x18, 0x03, 0x20, 0x01, 0x28, 0x02, 0x52, 0x01, 0x7a, 0x22, 0x8f, 0x03, 0x0a, 0x0a, + 0x4d, 0x69, 0x72, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x63, 0x6b, 0x73, + 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x4d, 0x69, 0x72, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x2e, 0x43, 0x6b, 0x73, 0x43, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x63, 0x6b, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x5f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x6d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x73, 0x73, 0x65, 0x74, 0x4d, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x73, 0x18, 0x64, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x69, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x52, 0x09, 0x70, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x31, 0x0a, 0x08, + 0x67, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x65, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x07, 0x67, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x1a, + 0x56, 0x0a, 0x0b, 0x43, 0x6b, 0x73, 0x43, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x31, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1b, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, + 0x03, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, + 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, + 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x22, 0x9f, 0x01, + 0x0a, 0x0e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x10, 0x0a, 0x03, 0x63, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x63, + 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, + 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x53, 0x75, 0x62, 0x43, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x73, 0x75, + 0x62, 0x43, 0x6e, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x53, 0x75, 0x62, 0x43, 0x6e, 0x74, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xe2, 0x03, 0x0a, 0x09, 0x41, 0x6e, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x43, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, + 0x41, 0x73, 0x73, 0x65, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, + 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x41, 0x73, + 0x73, 0x65, 0x74, 0x43, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x74, 0x61, 0x67, 0x73, 0x5f, 0x63, + 0x6e, 0x74, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x69, 0x72, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x73, + 0x2e, 0x54, 0x61, 0x67, 0x73, 0x43, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, + 0x61, 0x67, 0x73, 0x43, 0x6e, 0x74, 0x12, 0x4b, 0x0a, 0x0d, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, + 0x69, 0x64, 0x73, 0x5f, 0x63, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6d, 0x69, 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x43, 0x6e, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, + 0x43, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x61, + 0x6c, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x1a, 0x57, 0x0a, 0x0c, 0x54, 0x61, 0x67, + 0x73, 0x43, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x69, 0x72, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x4d, + 0x61, 0x70, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x49, 0x64, 0x73, 0x43, 0x6e, + 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, + 0x08, 0x06, 0x10, 0x07, 0x22, 0xbf, 0x04, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x0a, 0x0c, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x73, 0x73, 0x65, 0x74, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0b, 0x61, 0x73, 0x73, 0x65, 0x74, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x73, 0x73, 0x65, 0x74, 0x44, 0x69, 0x72, 0x12, + 0x28, 0x0a, 0x10, 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x73, 0x73, 0x65, 0x74, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x61, 0x73, 0x73, + 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x73, 0x73, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, + 0x78, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x65, 0x64, 0x69, 0x61, + 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x66, 0x6f, 0x6c, 0x64, 0x65, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, 0x65, 0x65, 0x64, 0x53, 0x75, 0x62, + 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x69, + 0x72, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x12, 0x15, 0x0a, 0x06, 0x67, 0x74, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x33, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x67, 0x74, 0x44, 0x69, 0x72, 0x12, 0x22, 0x0a, 0x0d, 0x67, 0x74, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x34, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x67, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x67, + 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x35, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x72, 0x18, + 0x36, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x65, 0x64, 0x44, 0x69, 0x72, 0x12, 0x26, + 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x37, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x72, 0x65, 0x64, 0x5f, 0x69, + 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x38, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x70, 0x72, 0x65, 0x64, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x22, 0x0a, 0x0d, 0x74, 0x76, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x5f, + 0x64, 0x69, 0x72, 0x18, 0x39, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x76, 0x74, 0x49, 0x6e, + 0x64, 0x65, 0x78, 0x44, 0x69, 0x72, 0x2a, 0x5a, 0x0a, 0x07, 0x54, 0x76, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x76, 0x74, 0x54, 0x79, 0x70, 0x65, 0x55, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x76, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x76, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x10, + 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x76, 0x74, 0x54, 0x79, 0x70, 0x65, 0x54, 0x65, 0x73, 0x74, + 0x10, 0x03, 0x2a, 0x88, 0x02, 0x0a, 0x09, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x14, 0x0a, 0x10, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x55, 0x6e, 0x6b, + 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4a, 0x70, 0x65, 0x67, 0x10, 0x01, 0x12, 0x15, + 0x0a, 0x11, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x50, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x69, 0x78, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x10, + 0x03, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x59, 0x75, 0x76, 0x34, 0x32, 0x30, 0x70, 0x10, 0x04, 0x12, 0x1a, 0x0a, 0x16, + 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x59, 0x75, + 0x76, 0x34, 0x32, 0x30, 0x73, 0x70, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x73, 0x73, 0x65, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x59, 0x75, 0x76, 0x34, 0x32, 0x32, + 0x70, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x49, 0x6d, 0x61, 0x67, 0x65, 0x59, 0x75, 0x76, 0x34, 0x32, 0x32, 0x73, 0x70, 0x10, 0x07, 0x12, + 0x15, 0x0a, 0x11, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x42, 0x6d, 0x70, 0x10, 0x08, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x73, 0x73, 0x65, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x56, 0x69, 0x64, 0x65, 0x6f, 0x4d, 0x70, 0x34, 0x10, 0x65, 0x2a, 0xf8, 0x02, + 0x0a, 0x08, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x13, 0x0a, 0x0f, 0x54, 0x61, + 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, + 0x14, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x54, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x61, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, + 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x10, 0x04, + 0x12, 0x16, 0x0a, 0x12, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x10, 0x05, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x61, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x44, 0x61, 0x74, 0x61, 0x10, 0x06, + 0x12, 0x14, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x70, 0x79, + 0x44, 0x61, 0x74, 0x61, 0x10, 0x07, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x10, 0x08, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x61, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x49, 0x6e, 0x66, 0x65, 0x72, 0x10, 0x09, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, + 0x10, 0x0a, 0x12, 0x12, 0x0a, 0x0e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x46, 0x75, + 0x73, 0x69, 0x6f, 0x6e, 0x10, 0x0b, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, + 0x70, 0x65, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x0c, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x61, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x10, + 0x0d, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x70, + 0x79, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x10, 0x0e, 0x12, 0x18, 0x0a, 0x14, 0x54, 0x61, 0x73, 0x6b, + 0x54, 0x79, 0x70, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x6e, 0x66, 0x65, 0x72, + 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x45, 0x76, + 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x10, 0x10, 0x2a, 0x87, 0x01, 0x0a, 0x09, 0x54, 0x61, 0x73, + 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, + 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x44, 0x6f, 0x6e, 0x65, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x54, + 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x04, 0x12, + 0x11, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4d, 0x69, 0x73, 0x73, + 0x10, 0x05, 0x2a, 0x4c, 0x0a, 0x08, 0x53, 0x68, 0x61, 0x31, 0x54, 0x79, 0x70, 0x65, 0x12, 0x15, + 0x0a, 0x11, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x48, 0x41, 0x31, 0x5f, 0x54, 0x59, + 0x50, 0x45, 0x5f, 0x41, 0x53, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x48, + 0x41, 0x31, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, + 0x2a, 0x66, 0x0a, 0x0a, 0x4d, 0x69, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x11, + 0x0a, 0x0d, 0x4d, 0x49, 0x52, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x53, 0x10, + 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4d, 0x49, 0x52, 0x5f, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x41, 0x54, + 0x49, 0x4f, 0x4e, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x49, 0x52, 0x5f, 0x4b, 0x45, + 0x59, 0x57, 0x4f, 0x52, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x4d, 0x49, 0x52, 0x5f, + 0x54, 0x41, 0x53, 0x4b, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x49, 0x52, 0x5f, 0x43, + 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x10, 0x04, 0x2a, 0x87, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, + 0x6f, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x46, 0x5f, 0x4e, 0x4f, + 0x5f, 0x41, 0x4e, 0x4e, 0x4f, 0x54, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, + 0x11, 0x41, 0x46, 0x5f, 0x44, 0x45, 0x54, 0x5f, 0x50, 0x41, 0x53, 0x43, 0x41, 0x4c, 0x5f, 0x56, + 0x4f, 0x43, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x41, 0x46, 0x5f, 0x44, 0x45, 0x54, 0x5f, 0x41, + 0x52, 0x4b, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x46, 0x5f, + 0x44, 0x45, 0x54, 0x5f, 0x4c, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x12, 0x0a, + 0x0e, 0x41, 0x46, 0x5f, 0x53, 0x45, 0x47, 0x5f, 0x50, 0x4f, 0x4c, 0x59, 0x47, 0x4f, 0x4e, 0x10, + 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x46, 0x5f, 0x53, 0x45, 0x47, 0x5f, 0x4d, 0x41, 0x53, 0x4b, + 0x10, 0x05, 0x2a, 0x36, 0x0a, 0x0b, 0x41, 0x73, 0x73, 0x65, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x46, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x46, 0x5f, 0x52, 0x41, 0x57, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x41, 0x46, 0x5f, 0x4c, 0x4d, 0x44, 0x42, 0x10, 0x02, 0x2a, 0x5d, 0x0a, 0x08, 0x41, 0x6e, + 0x6e, 0x6f, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x54, 0x5f, 0x43, 0x4c, 0x41, + 0x53, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x54, 0x5f, 0x44, 0x45, 0x54, 0x5f, 0x42, + 0x4f, 0x58, 0x10, 0x02, 0x12, 0x12, 0x0a, 0x0e, 0x41, 0x54, 0x5f, 0x53, 0x45, 0x47, 0x5f, 0x50, + 0x4f, 0x4c, 0x59, 0x47, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x54, 0x5f, 0x53, + 0x45, 0x47, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x04, 0x2a, 0x64, 0x0a, 0x13, 0x43, 0x6f, 0x6e, + 0x66, 0x75, 0x73, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x72, 0x69, 0x78, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x53, 0x65, 0x74, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, + 0x54, 0x50, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x46, 0x50, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, + 0x46, 0x4e, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x54, 0x4e, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x54, 0x50, + 0x10, 0x0b, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x44, 0x10, 0x0c, 0x2a, + 0x70, 0x0a, 0x0f, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x54, + 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x59, 0x10, 0x01, + 0x12, 0x14, 0x0a, 0x10, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x5f, 0x47, 0x54, 0x5f, 0x4f, 0x52, 0x5f, + 0x50, 0x52, 0x45, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x53, 0x5f, 0x45, 0x58, 0x43, + 0x45, 0x45, 0x44, 0x53, 0x5f, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, + 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x49, 0x44, 0x53, 0x10, + 0x04, 0x42, 0x09, 0x5a, 0x07, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_mir_command_proto_rawDescOnce sync.Once + file_mir_command_proto_rawDescData = file_mir_command_proto_rawDesc +) + +func file_mir_command_proto_rawDescGZIP() []byte { + file_mir_command_proto_rawDescOnce.Do(func() { + file_mir_command_proto_rawDescData = protoimpl.X.CompressGZIP(file_mir_command_proto_rawDescData) + }) + return file_mir_command_proto_rawDescData +} + +var file_mir_command_proto_enumTypes = make([]protoimpl.EnumInfo, 11) +var file_mir_command_proto_msgTypes = make([]protoimpl.MessageInfo, 53) +var file_mir_command_proto_goTypes = []interface{}{ + (TvtType)(0), // 0: mir.command.TvtType + (AssetType)(0), // 1: mir.command.AssetType + (TaskType)(0), // 2: mir.command.TaskType + (TaskState)(0), // 3: mir.command.TaskState + (Sha1Type)(0), // 4: mir.command.Sha1Type + (MirStorage)(0), // 5: mir.command.MirStorage + (AnnoFormat)(0), // 6: mir.command.AnnoFormat + (AssetFormat)(0), // 7: mir.command.AssetFormat + (AnnoType)(0), // 8: mir.command.AnnoType + (ConfusionMatrixType)(0), // 9: mir.command.ConfusionMatrixType + (EvaluationState)(0), // 10: mir.command.EvaluationState + (*MirMetadatas)(nil), // 11: mir.command.MirMetadatas + (*MetadataAttributes)(nil), // 12: mir.command.MetadataAttributes + (*Timestamp)(nil), // 13: mir.command.Timestamp + (*MirAnnotations)(nil), // 14: mir.command.MirAnnotations + (*SingleTaskAnnotations)(nil), // 15: mir.command.SingleTaskAnnotations + (*SingleImageAnnotations)(nil), // 16: mir.command.SingleImageAnnotations + (*SingleImageCks)(nil), // 17: mir.command.SingleImageCks + (*MaskAnnotation)(nil), // 18: mir.command.MaskAnnotation + (*ObjectAnnotation)(nil), // 19: mir.command.ObjectAnnotation + (*Rect)(nil), // 20: mir.command.Rect + (*MirKeywords)(nil), // 21: mir.command.MirKeywords + (*CiTagToIndex)(nil), // 22: mir.command.CiTagToIndex + (*StringList)(nil), // 23: mir.command.StringList + (*MapStringToInt32List)(nil), // 24: mir.command.MapStringToInt32List + (*Int32List)(nil), // 25: mir.command.Int32List + (*AssetAnnoIndex)(nil), // 26: mir.command.AssetAnnoIndex + (*MirTasks)(nil), // 27: mir.command.MirTasks + (*Task)(nil), // 28: mir.command.Task + (*ModelMeta)(nil), // 29: mir.command.ModelMeta + (*ModelStage)(nil), // 30: mir.command.ModelStage + (*Evaluation)(nil), // 31: mir.command.Evaluation + (*EvaluateConfig)(nil), // 32: mir.command.EvaluateConfig + (*SingleDatasetEvaluation)(nil), // 33: mir.command.SingleDatasetEvaluation + (*SingleIouEvaluation)(nil), // 34: mir.command.SingleIouEvaluation + (*SingleEvaluationElement)(nil), // 35: mir.command.SingleEvaluationElement + (*IntPoint)(nil), // 36: mir.command.IntPoint + (*FloatPoint)(nil), // 37: mir.command.FloatPoint + (*MirContext)(nil), // 38: mir.command.MirContext + (*SingleMapCount)(nil), // 39: mir.command.SingleMapCount + (*AnnoStats)(nil), // 40: mir.command.AnnoStats + (*ExportConfig)(nil), // 41: mir.command.ExportConfig + nil, // 42: mir.command.MirMetadatas.AttributesEntry + nil, // 43: mir.command.MirAnnotations.ImageCksEntry + nil, // 44: mir.command.SingleTaskAnnotations.ImageAnnotationsEntry + nil, // 45: mir.command.SingleTaskAnnotations.MapIdColorEntry + nil, // 46: mir.command.SingleImageCks.CksEntry + nil, // 47: mir.command.ObjectAnnotation.TagsEntry + nil, // 48: mir.command.MirKeywords.CkIdxEntry + nil, // 49: mir.command.CiTagToIndex.CisEntry + nil, // 50: mir.command.CiTagToIndex.TagsEntry + nil, // 51: mir.command.MapStringToInt32List.KeyIdsEntry + nil, // 52: mir.command.AssetAnnoIndex.AssetAnnosEntry + nil, // 53: mir.command.AssetAnnoIndex.SubIndexesEntry + nil, // 54: mir.command.MirTasks.TasksEntry + nil, // 55: mir.command.Task.NewTypesEntry + nil, // 56: mir.command.ModelMeta.StagesEntry + nil, // 57: mir.command.Evaluation.SubCksEntry + nil, // 58: mir.command.SingleDatasetEvaluation.IouEvaluationsEntry + nil, // 59: mir.command.SingleIouEvaluation.CiEvaluationsEntry + nil, // 60: mir.command.MirContext.CksCntEntry + nil, // 61: mir.command.SingleMapCount.SubCntEntry + nil, // 62: mir.command.AnnoStats.TagsCntEntry + nil, // 63: mir.command.AnnoStats.ClassIdsCntEntry +} +var file_mir_command_proto_depIdxs = []int32{ + 42, // 0: mir.command.MirMetadatas.attributes:type_name -> mir.command.MirMetadatas.AttributesEntry + 13, // 1: mir.command.MetadataAttributes.timestamp:type_name -> mir.command.Timestamp + 0, // 2: mir.command.MetadataAttributes.tvt_type:type_name -> mir.command.TvtType + 1, // 3: mir.command.MetadataAttributes.asset_type:type_name -> mir.command.AssetType + 15, // 4: mir.command.MirAnnotations.ground_truth:type_name -> mir.command.SingleTaskAnnotations + 15, // 5: mir.command.MirAnnotations.prediction:type_name -> mir.command.SingleTaskAnnotations + 43, // 6: mir.command.MirAnnotations.image_cks:type_name -> mir.command.MirAnnotations.ImageCksEntry + 44, // 7: mir.command.SingleTaskAnnotations.image_annotations:type_name -> mir.command.SingleTaskAnnotations.ImageAnnotationsEntry + 8, // 8: mir.command.SingleTaskAnnotations.type:type_name -> mir.command.AnnoType + 45, // 9: mir.command.SingleTaskAnnotations.map_id_color:type_name -> mir.command.SingleTaskAnnotations.MapIdColorEntry + 29, // 10: mir.command.SingleTaskAnnotations.model:type_name -> mir.command.ModelMeta + 19, // 11: mir.command.SingleImageAnnotations.boxes:type_name -> mir.command.ObjectAnnotation + 19, // 12: mir.command.SingleImageAnnotations.polygons:type_name -> mir.command.ObjectAnnotation + 18, // 13: mir.command.SingleImageAnnotations.mask:type_name -> mir.command.MaskAnnotation + 46, // 14: mir.command.SingleImageCks.cks:type_name -> mir.command.SingleImageCks.CksEntry + 20, // 15: mir.command.ObjectAnnotation.box:type_name -> mir.command.Rect + 47, // 16: mir.command.ObjectAnnotation.tags:type_name -> mir.command.ObjectAnnotation.TagsEntry + 9, // 17: mir.command.ObjectAnnotation.cm:type_name -> mir.command.ConfusionMatrixType + 36, // 18: mir.command.ObjectAnnotation.polygon:type_name -> mir.command.IntPoint + 22, // 19: mir.command.MirKeywords.pred_idx:type_name -> mir.command.CiTagToIndex + 22, // 20: mir.command.MirKeywords.gt_idx:type_name -> mir.command.CiTagToIndex + 48, // 21: mir.command.MirKeywords.ck_idx:type_name -> mir.command.MirKeywords.CkIdxEntry + 49, // 22: mir.command.CiTagToIndex.cis:type_name -> mir.command.CiTagToIndex.CisEntry + 50, // 23: mir.command.CiTagToIndex.tags:type_name -> mir.command.CiTagToIndex.TagsEntry + 51, // 24: mir.command.MapStringToInt32List.key_ids:type_name -> mir.command.MapStringToInt32List.KeyIdsEntry + 52, // 25: mir.command.AssetAnnoIndex.asset_annos:type_name -> mir.command.AssetAnnoIndex.AssetAnnosEntry + 53, // 26: mir.command.AssetAnnoIndex.sub_indexes:type_name -> mir.command.AssetAnnoIndex.SubIndexesEntry + 54, // 27: mir.command.MirTasks.tasks:type_name -> mir.command.MirTasks.TasksEntry + 2, // 28: mir.command.Task.type:type_name -> mir.command.TaskType + 29, // 29: mir.command.Task.model:type_name -> mir.command.ModelMeta + 31, // 30: mir.command.Task.evaluation:type_name -> mir.command.Evaluation + 55, // 31: mir.command.Task.new_types:type_name -> mir.command.Task.NewTypesEntry + 56, // 32: mir.command.ModelMeta.stages:type_name -> mir.command.ModelMeta.StagesEntry + 32, // 33: mir.command.Evaluation.config:type_name -> mir.command.EvaluateConfig + 33, // 34: mir.command.Evaluation.dataset_evaluation:type_name -> mir.command.SingleDatasetEvaluation + 33, // 35: mir.command.Evaluation.main_ck:type_name -> mir.command.SingleDatasetEvaluation + 57, // 36: mir.command.Evaluation.sub_cks:type_name -> mir.command.Evaluation.SubCksEntry + 10, // 37: mir.command.Evaluation.state:type_name -> mir.command.EvaluationState + 58, // 38: mir.command.SingleDatasetEvaluation.iou_evaluations:type_name -> mir.command.SingleDatasetEvaluation.IouEvaluationsEntry + 34, // 39: mir.command.SingleDatasetEvaluation.iou_averaged_evaluation:type_name -> mir.command.SingleIouEvaluation + 59, // 40: mir.command.SingleIouEvaluation.ci_evaluations:type_name -> mir.command.SingleIouEvaluation.CiEvaluationsEntry + 35, // 41: mir.command.SingleIouEvaluation.ci_averaged_evaluation:type_name -> mir.command.SingleEvaluationElement + 37, // 42: mir.command.SingleEvaluationElement.pr_curve:type_name -> mir.command.FloatPoint + 60, // 43: mir.command.MirContext.cks_cnt:type_name -> mir.command.MirContext.CksCntEntry + 40, // 44: mir.command.MirContext.pred_stats:type_name -> mir.command.AnnoStats + 40, // 45: mir.command.MirContext.gt_stats:type_name -> mir.command.AnnoStats + 61, // 46: mir.command.SingleMapCount.sub_cnt:type_name -> mir.command.SingleMapCount.SubCntEntry + 62, // 47: mir.command.AnnoStats.tags_cnt:type_name -> mir.command.AnnoStats.TagsCntEntry + 63, // 48: mir.command.AnnoStats.class_ids_cnt:type_name -> mir.command.AnnoStats.ClassIdsCntEntry + 7, // 49: mir.command.ExportConfig.asset_format:type_name -> mir.command.AssetFormat + 6, // 50: mir.command.ExportConfig.anno_format:type_name -> mir.command.AnnoFormat + 12, // 51: mir.command.MirMetadatas.AttributesEntry.value:type_name -> mir.command.MetadataAttributes + 17, // 52: mir.command.MirAnnotations.ImageCksEntry.value:type_name -> mir.command.SingleImageCks + 16, // 53: mir.command.SingleTaskAnnotations.ImageAnnotationsEntry.value:type_name -> mir.command.SingleImageAnnotations + 36, // 54: mir.command.SingleTaskAnnotations.MapIdColorEntry.value:type_name -> mir.command.IntPoint + 26, // 55: mir.command.MirKeywords.CkIdxEntry.value:type_name -> mir.command.AssetAnnoIndex + 24, // 56: mir.command.CiTagToIndex.CisEntry.value:type_name -> mir.command.MapStringToInt32List + 26, // 57: mir.command.CiTagToIndex.TagsEntry.value:type_name -> mir.command.AssetAnnoIndex + 25, // 58: mir.command.MapStringToInt32List.KeyIdsEntry.value:type_name -> mir.command.Int32List + 25, // 59: mir.command.AssetAnnoIndex.AssetAnnosEntry.value:type_name -> mir.command.Int32List + 24, // 60: mir.command.AssetAnnoIndex.SubIndexesEntry.value:type_name -> mir.command.MapStringToInt32List + 28, // 61: mir.command.MirTasks.TasksEntry.value:type_name -> mir.command.Task + 30, // 62: mir.command.ModelMeta.StagesEntry.value:type_name -> mir.command.ModelStage + 33, // 63: mir.command.Evaluation.SubCksEntry.value:type_name -> mir.command.SingleDatasetEvaluation + 34, // 64: mir.command.SingleDatasetEvaluation.IouEvaluationsEntry.value:type_name -> mir.command.SingleIouEvaluation + 35, // 65: mir.command.SingleIouEvaluation.CiEvaluationsEntry.value:type_name -> mir.command.SingleEvaluationElement + 39, // 66: mir.command.MirContext.CksCntEntry.value:type_name -> mir.command.SingleMapCount + 39, // 67: mir.command.AnnoStats.TagsCntEntry.value:type_name -> mir.command.SingleMapCount + 68, // [68:68] is the sub-list for method output_type + 68, // [68:68] is the sub-list for method input_type + 68, // [68:68] is the sub-list for extension type_name + 68, // [68:68] is the sub-list for extension extendee + 0, // [0:68] is the sub-list for field type_name +} + +func init() { file_mir_command_proto_init() } +func file_mir_command_proto_init() { + if File_mir_command_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_mir_command_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MirMetadatas); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetadataAttributes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MirAnnotations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleTaskAnnotations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleImageAnnotations); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleImageCks); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MaskAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ObjectAnnotation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Rect); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MirKeywords); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CiTagToIndex); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MapStringToInt32List); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Int32List); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AssetAnnoIndex); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MirTasks); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Task); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModelMeta); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModelStage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Evaluation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EvaluateConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleDatasetEvaluation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleIouEvaluation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleEvaluationElement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*IntPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FloatPoint); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MirContext); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SingleMapCount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnnoStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_mir_command_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_mir_command_proto_rawDesc, + NumEnums: 11, + NumMessages: 53, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_mir_command_proto_goTypes, + DependencyIndexes: file_mir_command_proto_depIdxs, + EnumInfos: file_mir_command_proto_enumTypes, + MessageInfos: file_mir_command_proto_msgTypes, + }.Build() + File_mir_command_proto = out.File + file_mir_command_proto_rawDesc = nil + file_mir_command_proto_goTypes = nil + file_mir_command_proto_depIdxs = nil +} diff --git a/ymir/backend/src/ymir_hel/run_tests.sh b/ymir/backend/src/ymir_hel/run_tests.sh new file mode 100644 index 0000000000..4257aac1c9 --- /dev/null +++ b/ymir/backend/src/ymir_hel/run_tests.sh @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +go vet ./... +staticcheck ./... +go test -race -coverprofile=profile.cov -vet=off $(go list ./... | grep -v -e protos -e common/constants -e docs) +go tool cover -func profile.cov && go tool cover -html profile.cov && rm profile.cov diff --git a/ymir/backend/src/ymir_hel/viewer/docs/docs.go b/ymir/backend/src/ymir_hel/viewer/docs/docs.go new file mode 100644 index 0000000000..7afc48a444 --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/docs/docs.go @@ -0,0 +1,318 @@ +// Package docs GENERATED BY SWAG; DO NOT EDIT +// This file was generated by swaggo/swag +package docs + +import "github.com/swaggo/swag" + +const docTemplate = `{ + "schemes": {{ marshal .Schemes }}, + "swagger": "2.0", + "info": { + "description": "{{escape .Description}}", + "title": "{{.Title}}", + "contact": {}, + "version": "{{.Version}}" + }, + "host": "{{.Host}}", + "basePath": "{{.BasePath}}", + "paths": { + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/assets": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query single or set of assets.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Offset, default is 0", + "name": "offset", + "in": "query" + }, + { + "type": "string", + "description": "limit, default is 1", + "name": "limit", + "in": "query" + }, + { + "type": "string", + "description": "e.g. class_ids=1,3,7", + "name": "class_ids", + "in": "query" + }, + { + "type": "string", + "description": "e.g. annotation_types=GT,PRED", + "name": "annotation_types", + "in": "query" + }, + { + "type": "string", + "description": "e.g. current_asset_id=xxxyyyzzz", + "name": "current_asset_id", + "in": "query" + }, + { + "type": "string", + "description": "e.g. cm_types=0,1,2,3 NotSet=0,TP=1,FP=2,FN=3,TN=4,Unknown=5,MTP=11,IGNORED=12", + "name": "cm_types", + "in": "query" + }, + { + "type": "string", + "description": "ck pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1", + "name": "cks", + "in": "query" + }, + { + "type": "string", + "description": "tag pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1", + "name": "tags", + "in": "query" + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryAssetsResult", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_meta_count": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query dataset info, lightweight api.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryDatasetStatsResult", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_stats": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query dataset Stats.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "e.g. class_ids=1,3,7", + "name": "class_ids", + "in": "query" + }, + { + "type": "string", + "description": "e.g. require_assets_hist", + "name": "require_assets_hist", + "in": "query" + }, + { + "type": "string", + "description": "e.g. require_annos_hist", + "name": "require_annos_hist", + "in": "query" + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryDatasetStatsResult", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/model_info": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query model info.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.MirdataModel", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/dataset_duplication": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query dataset dups.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "e.g. candidate_dataset_ids=xxx,yyy", + "name": "candidate_dataset_ids", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dataset_ids to be corroded", + "name": "corrodee_dataset_ids", + "in": "query" + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': 'duplication: 50, total_count: {xxx: 100, yyy: 200}'", + "schema": { + "type": "string" + } + } + } + } + } + } +}` + +// SwaggerInfo holds exported Swagger Info so clients can modify it +var SwaggerInfo = &swag.Spec{ + Version: "", + Host: "", + BasePath: "", + Schemes: []string{}, + Title: "", + Description: "", + InfoInstanceName: "swagger", + SwaggerTemplate: docTemplate, +} + +func init() { + swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo) +} diff --git a/ymir/backend/src/ymir_hel/viewer/docs/swagger.json b/ymir/backend/src/ymir_hel/viewer/docs/swagger.json new file mode 100644 index 0000000000..0bc1a5a49e --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/docs/swagger.json @@ -0,0 +1,290 @@ +{ + "swagger": "2.0", + "info": { + "contact": {} + }, + "paths": { + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/assets": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query single or set of assets.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Offset, default is 0", + "name": "offset", + "in": "query" + }, + { + "type": "string", + "description": "limit, default is 1", + "name": "limit", + "in": "query" + }, + { + "type": "string", + "description": "e.g. class_ids=1,3,7", + "name": "class_ids", + "in": "query" + }, + { + "type": "string", + "description": "e.g. annotation_types=GT,PRED", + "name": "annotation_types", + "in": "query" + }, + { + "type": "string", + "description": "e.g. current_asset_id=xxxyyyzzz", + "name": "current_asset_id", + "in": "query" + }, + { + "type": "string", + "description": "e.g. cm_types=0,1,2,3 NotSet=0,TP=1,FP=2,FN=3,TN=4,Unknown=5,MTP=11,IGNORED=12", + "name": "cm_types", + "in": "query" + }, + { + "type": "string", + "description": "ck pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1", + "name": "cks", + "in": "query" + }, + { + "type": "string", + "description": "tag pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1", + "name": "tags", + "in": "query" + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryAssetsResult", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_meta_count": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query dataset info, lightweight api.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryDatasetStatsResult", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_stats": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query dataset Stats.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "e.g. class_ids=1,3,7", + "name": "class_ids", + "in": "query" + }, + { + "type": "string", + "description": "e.g. require_assets_hist", + "name": "require_assets_hist", + "in": "query" + }, + { + "type": "string", + "description": "e.g. require_annos_hist", + "name": "require_annos_hist", + "in": "query" + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryDatasetStatsResult", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/model_info": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query model info.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Branch ID", + "name": "branchID", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.MirdataModel", + "schema": { + "type": "string" + } + } + } + } + }, + "/api/v1/users/{userID}/repo/{repoID}/dataset_duplication": { + "get": { + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "summary": "Query dataset dups.", + "parameters": [ + { + "type": "string", + "description": "User ID", + "name": "userID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "Repo ID", + "name": "repoID", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "e.g. candidate_dataset_ids=xxx,yyy", + "name": "candidate_dataset_ids", + "in": "query", + "required": true + }, + { + "type": "string", + "description": "dataset_ids to be corroded", + "name": "corrodee_dataset_ids", + "in": "query" + } + ], + "responses": { + "200": { + "description": "'code': 0, 'msg': 'Success', 'Success': true, 'result': 'duplication: 50, total_count: {xxx: 100, yyy: 200}'", + "schema": { + "type": "string" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/ymir/backend/src/ymir_hel/viewer/docs/swagger.yaml b/ymir/backend/src/ymir_hel/viewer/docs/swagger.yaml new file mode 100644 index 0000000000..4d7f5a2a73 --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/docs/swagger.yaml @@ -0,0 +1,196 @@ +info: + contact: {} +paths: + /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/assets: + get: + consumes: + - application/json + parameters: + - description: User ID + in: path + name: userID + required: true + type: string + - description: Repo ID + in: path + name: repoID + required: true + type: string + - description: Branch ID + in: path + name: branchID + required: true + type: string + - description: Offset, default is 0 + in: query + name: offset + type: string + - description: limit, default is 1 + in: query + name: limit + type: string + - description: e.g. class_ids=1,3,7 + in: query + name: class_ids + type: string + - description: e.g. annotation_types=GT,PRED + in: query + name: annotation_types + type: string + - description: e.g. current_asset_id=xxxyyyzzz + in: query + name: current_asset_id + type: string + - description: e.g. cm_types=0,1,2,3 NotSet=0,TP=1,FP=2,FN=3,TN=4,Unknown=5,MTP=11,IGNORED=12 + in: query + name: cm_types + type: string + - description: ck pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1 + in: query + name: cks + type: string + - description: tag pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1 + in: query + name: tags + type: string + produces: + - application/json + responses: + "200": + description: '''code'': 0, ''msg'': ''Success'', ''Success'': true, ''result'': + constants.QueryAssetsResult' + schema: + type: string + summary: Query single or set of assets. + /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_meta_count: + get: + consumes: + - application/json + parameters: + - description: User ID + in: path + name: userID + required: true + type: string + - description: Repo ID + in: path + name: repoID + required: true + type: string + - description: Branch ID + in: path + name: branchID + required: true + type: string + produces: + - application/json + responses: + "200": + description: '''code'': 0, ''msg'': ''Success'', ''Success'': true, ''result'': + constants.QueryDatasetStatsResult' + schema: + type: string + summary: Query dataset info, lightweight api. + /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_stats: + get: + consumes: + - application/json + parameters: + - description: User ID + in: path + name: userID + required: true + type: string + - description: Repo ID + in: path + name: repoID + required: true + type: string + - description: Branch ID + in: path + name: branchID + required: true + type: string + - description: e.g. class_ids=1,3,7 + in: query + name: class_ids + type: string + - description: e.g. require_assets_hist + in: query + name: require_assets_hist + type: string + - description: e.g. require_annos_hist + in: query + name: require_annos_hist + type: string + produces: + - application/json + responses: + "200": + description: '''code'': 0, ''msg'': ''Success'', ''Success'': true, ''result'': + constants.QueryDatasetStatsResult' + schema: + type: string + summary: Query dataset Stats. + /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/model_info: + get: + consumes: + - application/json + parameters: + - description: User ID + in: path + name: userID + required: true + type: string + - description: Repo ID + in: path + name: repoID + required: true + type: string + - description: Branch ID + in: path + name: branchID + required: true + type: string + produces: + - application/json + responses: + "200": + description: '''code'': 0, ''msg'': ''Success'', ''Success'': true, ''result'': constants.MirdataModel' + schema: + type: string + summary: Query model info. + /api/v1/users/{userID}/repo/{repoID}/dataset_duplication: + get: + consumes: + - application/json + parameters: + - description: User ID + in: path + name: userID + required: true + type: string + - description: Repo ID + in: path + name: repoID + required: true + type: string + - description: e.g. candidate_dataset_ids=xxx,yyy + in: query + name: candidate_dataset_ids + required: true + type: string + - description: dataset_ids to be corroded + in: query + name: corrodee_dataset_ids + type: string + produces: + - application/json + responses: + "200": + description: '''code'': 0, ''msg'': ''Success'', ''Success'': true, ''result'': + ''duplication: 50, total_count: {xxx: 100, yyy: 200}''' + schema: + type: string + summary: Query dataset dups. +swagger: "2.0" diff --git a/ymir/backend/src/ymir_hel/viewer/handler/handler.go b/ymir/backend/src/ymir_hel/viewer/handler/handler.go new file mode 100644 index 0000000000..ff9505559a --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/handler/handler.go @@ -0,0 +1,323 @@ +package handler + +import ( + "context" + "log" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/common/db/mongodb" + "github.com/IndustryEssentials/ymir-hel/common/loader" + "github.com/IndustryEssentials/ymir-hel/protos" + "github.com/mitchellh/mapstructure" + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +type BaseMirRepoLoader interface { + LoadSingleMirData(mirRepo *constants.MirRepo, mirFile constants.MirFile) interface{} + LoadMutipleMirDatas(mirRepo *constants.MirRepo, mirFiles []constants.MirFile) []interface{} + LoadModelInfo(mirRepo *constants.MirRepo) *constants.MirdataModel +} + +type BaseMongoServer interface { + CheckDatasetExistenceReady(mirRepo *constants.MirRepo) (bool, bool) + IndexDatasetData( + mirRepo *constants.MirRepo, + mirMetadatas *protos.MirMetadatas, + mirAnnotations *protos.MirAnnotations, + ) + RemoveNonReadyDataset() + QueryDatasetAssets( + mirRepo *constants.MirRepo, + offset int, + limit int, + classIDs []int, + annoTypes []string, + currentAssetID string, + cmTypes []int, + cks []string, + tags []string, + ) *constants.QueryAssetsResult + QueryDatasetStats( + mirRepo *constants.MirRepo, + classIDs []int, + requireAssetsHist bool, + requireAnnotationsHist bool, + result *constants.QueryDatasetStatsResult, + ) *constants.QueryDatasetStatsResult + MetricsQuerySignals( + collectionSuffix string, + userID string, + classIDs []int, + queryField string, + bucket string, + unit string, + limit int, + ) *[]constants.MetricsQueryPoint + MetricsRecordSignals(collectionSuffix string, id string, data interface{}) +} + +type ViewerHandler struct { + mongoServer BaseMongoServer + mirLoader BaseMirRepoLoader +} + +func NewViewerHandler( + mongoURI string, + mongoDataDBName string, + useDataDBCache bool, + mongoMetricsDBName string, +) *ViewerHandler { + var mongoServer *mongodb.MongoServer + if len(mongoURI) > 0 { + log.Printf("[viewer] init mongodb %s\n", mongoURI) + + mongoCtx := context.Background() + client, err := mongo.Connect(mongoCtx, options.Client().ApplyURI(mongoURI)) + if err != nil { + panic(err) + } + + mirDatabase := client.Database(mongoDataDBName) + metricsDatabase := client.Database(mongoMetricsDBName) + mongoServer = mongodb.NewMongoServer(mongoCtx, mirDatabase, metricsDatabase) + if useDataDBCache { + go mongoServer.RemoveNonReadyDataset() + } else { + // Clear cached data. + err = mirDatabase.Drop(mongoCtx) + if err != nil { + panic(err) + } + } + + } + + return &ViewerHandler{mongoServer: mongoServer, mirLoader: &loader.MirRepoLoader{}} +} + +func (v *ViewerHandler) loadAndIndexAssets(mirRepo *constants.MirRepo) { + exist, _ := v.mongoServer.CheckDatasetExistenceReady(mirRepo) + if exist { + return + } + + log.Printf("Mongodb %s not exist, loading mirdatas.", mirRepo.TaskID) + filesToLoad := []constants.MirFile{constants.MirfileMetadatas, constants.MirfileAnnotations} + mirDatas := v.mirLoader.LoadMutipleMirDatas(mirRepo, filesToLoad) + mirMetadatas := mirDatas[0].(*protos.MirMetadatas) + mirAnnotations := mirDatas[1].(*protos.MirAnnotations) + v.mongoServer.IndexDatasetData(mirRepo, mirMetadatas, mirAnnotations) +} + +func (v *ViewerHandler) GetAssetsHandler( + mirRepo *constants.MirRepo, + offset int, + limit int, + classIDs []int, + annoTypes []string, + currentAssetID string, + cmTypes []int, + cks []string, + tags []string, +) *constants.QueryAssetsResult { + v.loadAndIndexAssets(mirRepo) + return v.mongoServer.QueryDatasetAssets( + mirRepo, + offset, + limit, + classIDs, + annoTypes, + currentAssetID, + cmTypes, + cks, + tags, + ) +} + +func (v *ViewerHandler) GetDatasetMetaCountsHandler( + mirRepo *constants.MirRepo, +) *constants.QueryDatasetStatsResult { + result := constants.NewQueryDatasetStatsResult() + + mirContext := v.mirLoader.LoadSingleMirData(mirRepo, constants.MirfileContext).(*protos.MirContext) + result.TotalAssetsCount = int64(mirContext.ImagesCnt) + + gtStats := mirContext.GtStats + if gtStats != nil { + result.Gt.NegativeAssetsCount = int64(gtStats.NegativeAssetCnt) + result.Gt.PositiveAssetsCount = int64(gtStats.PositiveAssetCnt) + if gtStats.ClassIdsCnt != nil { + for k, v := range gtStats.ClassIdsCnt { + result.Gt.ClassIDsCount[int(k)] = int64(v) + } + } + result.Gt.AnnotationsCount = int64(gtStats.TotalCnt) + } + + predStats := mirContext.PredStats + if predStats != nil { + result.Pred.NegativeAssetsCount = int64(predStats.NegativeAssetCnt) + result.Pred.PositiveAssetsCount = int64(predStats.PositiveAssetCnt) + if predStats.ClassIdsCnt != nil { + for k, v := range predStats.ClassIdsCnt { + result.Pred.ClassIDsCount[int(k)] = int64(v) + } + } + result.Pred.AnnotationsCount = int64(predStats.TotalCnt) + } + + exist, ready := v.mongoServer.CheckDatasetExistenceReady(mirRepo) + result.QueryContext.RepoIndexExist = exist + result.QueryContext.RepoIndexReady = ready + if !exist { + go v.loadAndIndexAssets(mirRepo) + } + + mirTasks := v.mirLoader.LoadSingleMirData(mirRepo, constants.MirfileTasks).(*protos.MirTasks) + task := mirTasks.Tasks[mirTasks.HeadTaskId] + result.NewTypesAdded = task.NewTypesAdded + + result.TotalAssetsFileSize = int64(mirContext.TotalAssetMbytes) + for k, v := range mirContext.CksCnt { + result.CksCountTotal[k] = int64(v.Cnt) + result.CksCount[k] = map[string]int64{} + for k2, v2 := range v.SubCnt { + result.CksCount[k][k2] = int64(v2) + } + } + + if mirContext.GtStats != nil && mirContext.GtStats.TagsCnt != nil { + for k, v := range mirContext.GtStats.TagsCnt { + result.Gt.TagsCountTotal[k] = int64(v.Cnt) + result.Gt.TagsCount[k] = map[string]int64{} + for k2, v2 := range v.SubCnt { + result.Gt.TagsCount[k][k2] = int64(v2) + } + } + } + + if mirContext.PredStats != nil { + result.Pred.EvalClassIDs = mirContext.PredStats.EvalClassIds + if mirContext.PredStats.TagsCnt != nil { + for k, v := range mirContext.PredStats.TagsCnt { + result.Pred.TagsCountTotal[k] = int64(v.Cnt) + result.Pred.TagsCount[k] = map[string]int64{} + for k2, v2 := range v.SubCnt { + result.Pred.TagsCount[k][k2] = int64(v2) + } + } + } + } + + return result +} + +func (v *ViewerHandler) GetDatasetStatsHandler( + mirRepo *constants.MirRepo, + classIDs []int, + requireAssetsHist bool, + requireAnnotationsHist bool, +) *constants.QueryDatasetStatsResult { + if len(classIDs) < 1 && !requireAssetsHist && !requireAnnotationsHist { + panic("same result as dataset_meta_count, should use lightweight interface instead.") + } + + v.loadAndIndexAssets(mirRepo) + // Use metadata_handler to fill basic info. + result := v.GetDatasetMetaCountsHandler(mirRepo) + // Two fields need indexed data: + // 1. negative counts (classIDs) + // 2. build histogram (requireAssetsHist, requireAnnotationsHist) + return v.mongoServer.QueryDatasetStats(mirRepo, classIDs, requireAssetsHist, requireAnnotationsHist, result) +} + +func (v *ViewerHandler) GetDatasetDupHandler( + candidateMirRepos []*constants.MirRepo, + corrodeeMirRepos []*constants.MirRepo, +) *constants.QueryDatasetDupResult { + joinAssetCountMax := 0 + assetsCountMap := make(map[string]int64, len(candidateMirRepos)) + candidateMetadatas := []*protos.MirMetadatas{} + for _, mirRepo := range candidateMirRepos { + candidateMetadata := v.mirLoader.LoadSingleMirData(mirRepo, constants.MirfileMetadatas).(*protos.MirMetadatas) + + joinAssetCountMax += len(candidateMetadata.Attributes) + assetsCountMap[mirRepo.TaskID] = int64(len(candidateMetadata.Attributes)) + candidateMetadatas = append(candidateMetadatas, candidateMetadata) + } + + // Count dups. + dupCount := 0 + joinedAssetIDMap := make(map[string]bool, joinAssetCountMax) + for _, candidateMetadata := range candidateMetadatas { + for assetID := range candidateMetadata.Attributes { + if _, ok := joinedAssetIDMap[assetID]; ok { + dupCount++ + } else { + joinedAssetIDMap[assetID] = true + } + } + } + + // Count corrode residency. + residualCountMap := make(map[string]int64, len(corrodeeMirRepos)) + for _, mirRepo := range corrodeeMirRepos { + corrodeeMetadata := v.mirLoader.LoadSingleMirData(mirRepo, constants.MirfileMetadatas).(*protos.MirMetadatas) + + residualCount := len(corrodeeMetadata.Attributes) + for assetID := range corrodeeMetadata.Attributes { + if _, ok := joinedAssetIDMap[assetID]; ok { + residualCount-- + } + } + residualCountMap[mirRepo.TaskID] = int64(residualCount) + } + + return &constants.QueryDatasetDupResult{ + Duplication: dupCount, + TotalCount: assetsCountMap, + ResidualCount: residualCountMap, + } +} + +func (v *ViewerHandler) GetModelInfoHandler( + mirRepo *constants.MirRepo, +) *constants.MirdataModel { + return v.mirLoader.LoadModelInfo(mirRepo) +} + +func (v *ViewerHandler) MetricsRecordHandler( + metricsGroup string, + postForm map[string]interface{}, +) { + dataType := constants.ParseMirMetrics(metricsGroup) + if dataType == constants.MetricsUnknown { + panic("unknown metrics type") + } + + data := constants.MetricsDataPoint{} + err := mapstructure.Decode(postForm, &data) + if err != nil { + panic(err) + } + + v.mongoServer.MetricsRecordSignals(metricsGroup, postForm["id"].(string), data) +} + +func (v *ViewerHandler) MetricsQueryHandler( + metricsGroup string, + userID string, + classIDs []int, + queryField string, + bucket string, + unit string, + limit int, +) *[]constants.MetricsQueryPoint { + dataType := constants.ParseMirMetrics(metricsGroup) + if dataType == constants.MetricsUnknown { + panic("unknown metrics type") + } + + return v.mongoServer.MetricsQuerySignals(metricsGroup, userID, classIDs, queryField, bucket, unit, limit) +} diff --git a/ymir/backend/src/ymir_hel/viewer/handler/handler_test.go b/ymir/backend/src/ymir_hel/viewer/handler/handler_test.go new file mode 100644 index 0000000000..4f9122af95 --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/handler/handler_test.go @@ -0,0 +1,324 @@ +package handler + +import ( + "encoding/json" + "strconv" + "testing" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/protos" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +type MockMirRepoLoader struct { + mock.Mock +} + +func (m *MockMirRepoLoader) LoadSingleMirData(mirRepo *constants.MirRepo, mirFile constants.MirFile) interface{} { + args := m.Called(mirRepo, mirFile) + return args.Get(0) +} + +func (m *MockMirRepoLoader) LoadMutipleMirDatas( + mirRepo *constants.MirRepo, + mirFiles []constants.MirFile, +) []interface{} { + args := m.Called(mirRepo, mirFiles) + return args.Get(0).([]interface{}) +} + +func (m *MockMirRepoLoader) LoadModelInfo(mirRepo *constants.MirRepo) *constants.MirdataModel { + args := m.Called(mirRepo) + return args.Get(0).(*constants.MirdataModel) +} + +type MockMongoServer struct { + mock.Mock +} + +func (m *MockMongoServer) CheckDatasetExistenceReady(mirRepo *constants.MirRepo) (bool, bool) { + args := m.Called(mirRepo) + return args.Bool(0), args.Bool(1) +} + +func (m *MockMongoServer) RemoveNonReadyDataset() { + m.Called() +} + +func (m *MockMongoServer) IndexDatasetData( + mirRepo *constants.MirRepo, + mirMetadatas *protos.MirMetadatas, + mirAnnotations *protos.MirAnnotations, +) { + m.Called(mirRepo, mirMetadatas, mirAnnotations) +} + +func (m *MockMongoServer) QueryDatasetAssets( + mirRepo *constants.MirRepo, + offset int, + limit int, + classIDs []int, + annoTypes []string, + currentAssetID string, + cmTypes []int, + cks []string, + tags []string, +) *constants.QueryAssetsResult { + args := m.Called(mirRepo, offset, limit, classIDs, annoTypes, currentAssetID, cmTypes, cks, tags) + return args.Get(0).(*constants.QueryAssetsResult) +} + +func (m *MockMongoServer) QueryDatasetStats( + mirRepo *constants.MirRepo, + classIDs []int, + requireAssetsHist bool, + requireAnnotationsHist bool, + result *constants.QueryDatasetStatsResult, +) *constants.QueryDatasetStatsResult { + args := m.Called(mirRepo, classIDs, requireAssetsHist, requireAnnotationsHist, result) + return args.Get(0).(*constants.QueryDatasetStatsResult) +} + +func (m *MockMongoServer) MetricsRecordSignals(collectionSuffix string, id string, data interface{}) { + m.Called(collectionSuffix, id, data) +} + +func (m *MockMongoServer) MetricsQuerySignals( + collectionSuffix string, + userID string, + classIDs []int, + queryField string, + bucket string, + unit string, + limit int, +) *[]constants.MetricsQueryPoint { + args := m.Called(collectionSuffix, userID, queryField, bucket, unit, limit) + return args.Get(0).(*[]constants.MetricsQueryPoint) +} + +func TestGetDatasetMetaCountsHandler(t *testing.T) { + mirFileContext := constants.MirfileContext + mirFileTasks := constants.MirfileTasks + mockMirContext := protos.MirContext{} + err := json.Unmarshal([]byte(`{ + "images_cnt": 20, + "cks_cnt": + { + "city": + { + "cnt": 1, + "sub_cnt": + { + "hangzhou": 1 + } + } + }, + "pred_stats": + { + "positive_asset_cnt": 8, + "negative_asset_cnt": 5, + "eval_class_ids": [0, 1], + "class_ids_cnt": + { + "1": 8 + }, + "tags_cnt": { + "city": { + "cnt": 1, + "sub_cnt": + { + "shenzhen": 1 + } + }} + }, + "gt_stats": + { + "positive_asset_cnt": 3, + "negative_asset_cnt": 2, + "class_ids_cnt": + { + "0": 3 + }, + "tags_cnt": { + "city": { + "cnt": 1, + "sub_cnt": + { + "guangzhou": 1 + } + }} + } + }`), &mockMirContext) + if err != nil { + panic(err) + } + expectedResult := constants.QueryDatasetStatsResult{} + err = json.Unmarshal([]byte(`{ + "gt": + { + "class_ids_count": + { + "0": 3 + }, + "negative_assets_count": 2, + "positive_assets_count": 3, + "tags_count_total": + { + "city": 1 + }, + "tags_count": + { + "city": + { + "guangzhou": 1 + } + } + }, + "pred": + { + "class_ids_count": + { + "1": 8 + }, + "negative_assets_count": 5, + "positive_assets_count": 8, + "eval_class_ids": [0, 1], + "tags_count_total": + { + "city": 1 + }, + "tags_count": + { + "city": + { + "shenzhen": 1 + } + } + }, + "total_assets_count": 20, + "cks_count_total": + { + "city": 1 + }, + "cks_count": + { + "city": + { + "hangzhou": 1 + } + }, + "new_types_added": true, + "query_context": { + "repo_index_exist": true, + "repo_index_ready": true + } + }`), &expectedResult) + if err != nil { + panic(err) + } + + mirRepo := constants.MirRepo{} + mockLoader := MockMirRepoLoader{} + mockLoader.On("LoadSingleMirData", &mirRepo, mirFileContext).Return(&mockMirContext, 0, 0).Once() + mockLoader.On("LoadSingleMirData", &mirRepo, mirFileTasks). + Return(&protos.MirTasks{HeadTaskId: "h", Tasks: map[string]*protos.Task{"h": {NewTypesAdded: true}}}, 0, 0). + Once() + mockMongoServer := MockMongoServer{} + mockMongoServer.On("CheckDatasetExistenceReady", &mirRepo).Return(true, true).Twice() + + handler := &ViewerHandler{mongoServer: &mockMongoServer, mirLoader: &mockLoader} + result := handler.GetDatasetMetaCountsHandler(&mirRepo) + + assert.Equal(t, &expectedResult, result) + mockLoader.AssertExpectations(t) + +} + +func TestGetDatasetDupHandler(t *testing.T) { + mirRepo0 := constants.MirRepo{BranchID: "a", TaskID: "a"} + mirRepo1 := constants.MirRepo{BranchID: "b", TaskID: "b"} + mockMongoServer := MockMongoServer{} + + expectedDup := 100 + expectedCount0 := int64(expectedDup) + expectedCount1 := int64(expectedDup) + + mockLoader := MockMirRepoLoader{} + mockMirMetadatas := protos.MirMetadatas{Attributes: map[string]*protos.MetadataAttributes{}} + for i := 0; i < expectedDup; i++ { + mockMirMetadatas.Attributes[strconv.Itoa(i)] = &protos.MetadataAttributes{} + } + mockLoader.On("LoadSingleMirData", &mirRepo0, constants.MirfileMetadatas).Return(&mockMirMetadatas).Once() + mockLoader.On("LoadSingleMirData", &mirRepo1, constants.MirfileMetadatas).Return(&mockMirMetadatas).Once() + + expectedResult := &constants.QueryDatasetDupResult{ + Duplication: expectedDup, + TotalCount: map[string]int64{"a": expectedCount0, "b": expectedCount1}, + ResidualCount: map[string]int64{}, + } + + handler := &ViewerHandler{mongoServer: &mockMongoServer, mirLoader: &mockLoader} + resultData := handler.GetDatasetDupHandler([]*constants.MirRepo{&mirRepo0, &mirRepo1}, []*constants.MirRepo{}) + assert.Equal(t, expectedResult, resultData) +} + +func TestGetDatasetStatsHandler(t *testing.T) { + mirFileContext := constants.MirfileContext + mirFileTasks := constants.MirfileTasks + mockMirContext := protos.MirContext{} + mockAssetsDetail := []constants.MirAssetDetail{{AssetID: "a"}} + mirRepo := constants.MirRepo{} + mockLoader := MockMirRepoLoader{} + mockLoader.On("LoadAssetsDetail", &mirRepo, "", 0, 0).Return(mockAssetsDetail, int64(0), int64(0)) + mockLoader.On("LoadSingleMirData", &mirRepo, mirFileContext).Return(&mockMirContext).Once() + mockLoader.On("LoadSingleMirData", &mirRepo, mirFileTasks). + Return(&protos.MirTasks{HeadTaskId: "h", Tasks: map[string]*protos.Task{"h": {NewTypesAdded: true}}}). + Once() + + classIDs := []int{0, 1} + expectedResult := constants.NewQueryDatasetStatsResult() + expectedResult.QueryContext.RepoIndexExist = true + expectedResult.QueryContext.RepoIndexReady = true + expectedResult.NewTypesAdded = true + mockMongoServer := MockMongoServer{} + mockMongoServer.On("CheckDatasetExistenceReady", &mirRepo).Return(true, true).Twice() + mockMongoServer.On("QueryDatasetStats", &mirRepo, classIDs, true, true, expectedResult).Return(expectedResult) + handler := &ViewerHandler{mongoServer: &mockMongoServer, mirLoader: &mockLoader} + + result := handler.GetDatasetStatsHandler(&mirRepo, classIDs, true, true) + assert.Equal(t, expectedResult, result) +} + +func TestGetAssetsHandler(t *testing.T) { + mirRepo := constants.MirRepo{} + mockLoader := MockMirRepoLoader{} + + offset := 100 + limit := 10 + classIDs := []int{0, 1} + annoTypes := []string{"gt", "pred"} + currentAssetID := "abc" + cmTypes := []int{0, 1} + cks := []string{"a", "b", "c"} + tags := []string{"x", "y", "z"} + expectedResult := &constants.QueryAssetsResult{} + mockMongoServer := MockMongoServer{} + mockMongoServer.On("CheckDatasetExistenceReady", &mirRepo).Return(true, true) + mockMongoServer.On("QueryDatasetAssets", &mirRepo, offset, limit, classIDs, annoTypes, currentAssetID, cmTypes, cks, tags). + Return(expectedResult) + handler := &ViewerHandler{mongoServer: &mockMongoServer, mirLoader: &mockLoader} + + result := handler.GetAssetsHandler( + &mirRepo, + offset, + limit, + classIDs, + annoTypes, + currentAssetID, + cmTypes, + cks, + tags, + ) + assert.Equal(t, expectedResult, result) +} diff --git a/ymir/backend/src/ymir_hel/viewer/server/response.go b/ymir/backend/src/ymir_hel/viewer/server/response.go new file mode 100644 index 0000000000..c25225a90e --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/server/response.go @@ -0,0 +1,50 @@ +package server + +import ( + "log" + "net/http" + "runtime/debug" + + "github.com/gin-gonic/gin" + + "github.com/IndustryEssentials/ymir-hel/common/constants" +) + +type ResultVO struct { + Code constants.ResponseCode `json:"code"` + Msg string `json:"msg"` + Success bool `json:"success"` + Result interface{} `json:"result"` +} + +type FailureResult struct { + Code constants.ResponseCode `json:"code"` + Msg string `json:"msg"` +} + +func ViewerSuccess(ctx *gin.Context, result interface{}) { + resp := &ResultVO{Code: constants.CodeSuccess, Msg: "Success", Success: true, Result: result} + ctx.JSON(http.StatusOK, resp) +} + +func ViewerFailure(ctx *gin.Context, result *FailureResult) { + resp := &ResultVO{Code: result.Code, Msg: result.Msg, Success: false, Result: result} + ctx.JSON(http.StatusBadRequest, resp) + log.Printf("ViewerFailure\n%#v\n%s\n", *result, debug.Stack()) +} + +func ViewerFailureFromErr(ctx *gin.Context, err error) { + errString := err.Error() + errCode := constants.CodeViewerGeneral + + switch errString { + case "unknown ref": + errCode = constants.CodeViewerRepoNotExist + } + + result := FailureResult{ + Code: errCode, + Msg: errString, + } + ViewerFailure(ctx, &result) +} diff --git a/ymir/backend/src/ymir_hel/viewer/server/response_test.go b/ymir/backend/src/ymir_hel/viewer/server/response_test.go new file mode 100644 index 0000000000..e2aecfe2f1 --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/server/response_test.go @@ -0,0 +1,54 @@ +package server + +import ( + "encoding/json" + "net/http/httptest" + "testing" + + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + + "github.com/IndustryEssentials/ymir-hel/common/constants" +) + +func TestViewerSuccess(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + msg := "testing ViewerSuccess" + ViewerSuccess(c, msg) + + expectedData := ResultVO{ + Code: constants.CodeSuccess, + Msg: "Success", + Success: true, + Result: msg, + } + expectedDataBytes, _ := json.Marshal(expectedData) + assert.Equal(t, 200, w.Code) + assert.Equal(t, expectedDataBytes, w.Body.Bytes()) +} + +func TestViewerFailure(t *testing.T) { + + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + result := FailureResult{Code: constants.CodeViewerGeneral, Msg: "testing ViewerFailure"} + ViewerFailure( + c, + &result, + ) + + expectedData := ResultVO{ + Code: result.Code, + Msg: result.Msg, + Success: false, + Result: result, + } + expectedDataBytes, _ := json.Marshal(expectedData) + assert.Equal(t, 400, w.Code) + assert.Equal(t, expectedDataBytes, w.Body.Bytes()) +} diff --git a/ymir/backend/src/ymir_hel/viewer/server/server.go b/ymir/backend/src/ymir_hel/viewer/server/server.go new file mode 100644 index 0000000000..f80afdb20c --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/server/server.go @@ -0,0 +1,509 @@ +package server + +import ( + "errors" + "fmt" + "log" + "net/http" + "strconv" + "strings" + "time" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/configs" + docs "github.com/IndustryEssentials/ymir-hel/viewer/docs" + "github.com/IndustryEssentials/ymir-hel/viewer/handler" + "github.com/gin-gonic/gin" + "github.com/penglongli/gin-metrics/ginmetrics" + swaggerFiles "github.com/swaggo/files" + ginSwagger "github.com/swaggo/gin-swagger" +) + +// BaseHandler delcare base handler interface, which is used in viewer. +type BaseHandler interface { + GetAssetsHandler( + mirRepo *constants.MirRepo, + offset int, + limit int, + classIDs []int, + annoTypes []string, + currentAssetID string, + cmTypes []int, + cks []string, + tags []string, + ) *constants.QueryAssetsResult + GetDatasetDupHandler( + candidateMirRepos []*constants.MirRepo, + corrodeeMirRepos []*constants.MirRepo, + ) *constants.QueryDatasetDupResult + GetDatasetMetaCountsHandler( + mirRepo *constants.MirRepo, + ) *constants.QueryDatasetStatsResult + GetDatasetStatsHandler( + mirRepo *constants.MirRepo, + classIDs []int, + requireAssetsHist bool, + requireAnnotationsHist bool, + ) *constants.QueryDatasetStatsResult + GetModelInfoHandler( + mirRepo *constants.MirRepo, + ) *constants.MirdataModel + MetricsQueryHandler( + metricsGroup string, + userID string, + classIDs []int, + queryField string, + bucket string, + unit string, + limit int, + ) *[]constants.MetricsQueryPoint + MetricsRecordHandler( + metricsGroup string, + postForm map[string]interface{}, + ) +} + +type ViewerServer struct { + addr string + gin *gin.Engine + sandbox string + config *configs.Config + handler BaseHandler +} + +func NewViewerServer(config *configs.Config) (ViewerServer, error) { + gin.SetMode(gin.ReleaseMode) + viewerServer := ViewerServer{ + addr: config.ViewerURI, + gin: gin.New(), + sandbox: config.YmirSandbox, + config: config, + handler: handler.NewViewerHandler( + config.MongoDBURI, + config.MongoDataDBName, + config.MongoDataDBCache, + config.MongoMetricsDBName, + ), + } + + viewerServer.gin.Use( + gin.LoggerWithWriter(gin.DefaultWriter, "/health", "/metrics"), + gin.Recovery(), + ) + + // get global Monitor object + m := ginmetrics.GetMonitor() + + // +optional set metric path, default /debug/metrics + m.SetMetricPath("/metrics") + // +optional set slow time, default 5s + m.SetSlowTime(10) + // +optional set request duration, default {0.1, 0.3, 1.2, 5, 10} + // used to p95, p99 + m.SetDuration([]float64{0.1, 0.3, 1.2, 5, 10}) + + // set middleware for gin + m.Use(viewerServer.gin) + + viewerServer.routes() + return viewerServer, nil +} + +func StartViewerServer(config *configs.Config) error { + viewerServer, err := NewViewerServer(config) + if err != nil { + return err + } + defer viewerServer.Clear() + viewerServer.Start() + return nil +} + +func (s *ViewerServer) Start() { + srv := &http.Server{ + Addr: s.addr, + Handler: s.gin, + ReadTimeout: time.Duration(s.config.ViewerTimeout) * time.Second, + WriteTimeout: time.Duration(s.config.ViewerTimeout) * time.Second, + } + log.Fatal(srv.ListenAndServe()) +} + +func (s *ViewerServer) Clear() { + fmt.Println("server cleared.") +} + +func (s *ViewerServer) routes() { + r := s.gin + + docs.SwaggerInfo.BasePath = "/api/v1" + r.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerFiles.Handler)) + + r.GET("/health", s.handleHealth) + + apiPath := r.Group("/api/v1") + { + apiPath.GET("/users/:userID/repo/:repoID/dataset_duplication", s.handleDatasetDup) + apiPath.GET("/users/:userID/repo/:repoID/branch/:branchID/assets", s.handleAssets) + apiPath.GET("/users/:userID/repo/:repoID/branch/:branchID/dataset_meta_count", s.handleDatasetMetaCounts) + apiPath.GET("/users/:userID/repo/:repoID/branch/:branchID/dataset_stats", s.handleDatasetStats) + apiPath.GET("/users/:userID/repo/:repoID/branch/:branchID/model_info", s.handleModelInfo) + apiPath.GET("/user_metrics/:metrics_group", s.handleMetricsQuery) + apiPath.POST("/user_metrics/:metrics_group", s.handleMetricsRecord) + } +} + +func (s *ViewerServer) buildMirRepoFromParam(c *gin.Context) *constants.MirRepo { + userID := c.Param("userID") + repoID := c.Param("repoID") + branchID := c.Param("branchID") + return &constants.MirRepo{ + SandboxRoot: s.sandbox, + UserID: userID, + RepoID: repoID, + BranchID: branchID, + TaskID: branchID, + } +} + +func (s *ViewerServer) getInt(input string) int { + data, err := strconv.Atoi(input) + if err != nil { + data = 0 + } + return data +} + +func (s *ViewerServer) getIntSliceFromString(input string) []int { + classIDs := make([]int, 0) + classIDsStr := input + if len(classIDsStr) < 1 { + return classIDs + } + + classIDsStrs := strings.Split(classIDsStr, ",") + for _, v := range classIDsStrs { + if len(v) < 1 { + continue + } + + classID, err := strconv.Atoi(v) + if err != nil { + continue + } + classIDs = append(classIDs, classID) + } + return classIDs +} + +// @Summary Query single or set of assets. +// @Accept json +// @Produce json +// @Param userID path string true "User ID" +// @Param repoID path string true "Repo ID" +// @Param branchID path string true "Branch ID" +// @Param offset query string false "Offset, default is 0" +// @Param limit query string false "limit, default is 1" +// @Param class_ids query string false "e.g. class_ids=1,3,7" +// @Param annotation_types query string false "e.g. annotation_types=GT,PRED" +// @Param current_asset_id query string false "e.g. current_asset_id=xxxyyyzzz" +// @Param cm_types query string false "e.g. cm_types=0,1,2,3 NotSet=0,TP=1,FP=2,FN=3,TN=4,Unknown=5,MTP=11,IGNORED=12" +// @Param cks query string false "ck pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1" +// @Param tags query string false "tag pairs, e.g. cks=xxx,xxx:,xxx:yyy, e.g. camera_id:1" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryAssetsResult" +// @Router /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/assets [get] +func (s *ViewerServer) handleAssets(c *gin.Context) { + defer s.handleFailure(c) + + mirRepo := s.buildMirRepoFromParam(c) + offset := s.getInt(c.DefaultQuery("offset", "0")) + if offset < 0 { + offset = 0 + } + limit := s.getInt(c.DefaultQuery("limit", "0")) + if limit < 1 { + limit = 1 + } + classIDs := s.getIntSliceFromString(c.DefaultQuery("class_ids", "")) + currentAssetID := c.DefaultQuery("current_asset_id", "") + cmTypes := s.getIntSliceFromString(c.DefaultQuery("cm_types", "")) + + annoTypesStr := c.DefaultQuery("annotation_types", "") + annoTypes := make([]string, 0) + if len(annoTypesStr) > 0 { + for _, v := range strings.Split(annoTypesStr, ",") { + if len(v) > 0 { + annoType := strings.ToLower(v) + if annoType == "gt" || annoType == "pred" { + annoTypes = append(annoTypes, annoType) + } + } + } + } + + cksStr := c.DefaultQuery("cks", "") + cks := make([]string, 0) + if len(cksStr) > 0 { + for _, v := range strings.Split(cksStr, ",") { + if len(v) > 0 { + cks = append(cks, v) + } + } + } + + tagsStr := c.DefaultQuery("tags", "") + tags := make([]string, 0) + if len(tagsStr) > 0 { + for _, v := range strings.Split(tagsStr, ",") { + if len(v) > 0 { + tags = append(tags, v) + } + } + } + + resultData := s.handler.GetAssetsHandler( + mirRepo, + offset, + limit, + classIDs, + annoTypes, + currentAssetID, + cmTypes, + cks, + tags, + ) + ViewerSuccess(c, resultData) +} + +// @Summary Query dataset info, lightweight api. +// @Accept json +// @Produce json +// @Param userID path string true "User ID" +// @Param repoID path string true "Repo ID" +// @Param branchID path string true "Branch ID" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryDatasetStatsResult" +// @Router /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_meta_count [get] +func (s *ViewerServer) handleDatasetMetaCounts(c *gin.Context) { + defer s.handleFailure(c) + + mirRepo := s.buildMirRepoFromParam(c) + + resultData := s.handler.GetDatasetMetaCountsHandler(mirRepo) + ViewerSuccess(c, resultData) +} + +// @Summary Query dataset Stats. +// @Accept json +// @Produce json +// @Param userID path string true "User ID" +// @Param repoID path string true "Repo ID" +// @Param branchID path string true "Branch ID" +// @Param class_ids query string false "e.g. class_ids=1,3,7" +// @Param require_assets_hist query string false "e.g. require_assets_hist=True" +// @Param require_annos_hist query string false "e.g. require_annos_hist=True" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.QueryDatasetStatsResult" +// @Router /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/dataset_stats [get] +func (s *ViewerServer) handleDatasetStats(c *gin.Context) { + defer s.handleFailure(c) + + mirRepo := s.buildMirRepoFromParam(c) + classIDs := s.getIntSliceFromString(c.DefaultQuery("class_ids", "")) + + requireAssetsHistStr := c.DefaultQuery("require_assets_hist", "False") + if len(requireAssetsHistStr) == 0 { + requireAssetsHistStr = "True" + } + requireAssetsHist, err := strconv.ParseBool(requireAssetsHistStr) + if err != nil { + panic(err) + } + + requireAnnotationsHistStr := c.DefaultQuery("require_annos_hist", "False") + if len(requireAnnotationsHistStr) == 0 { + requireAnnotationsHistStr = "True" + } + requireAnnotationsHist, err := strconv.ParseBool(requireAnnotationsHistStr) + if err != nil { + panic(err) + } + + resultData := s.handler.GetDatasetStatsHandler(mirRepo, classIDs, requireAssetsHist, requireAnnotationsHist) + ViewerSuccess(c, resultData) +} + +// @Summary Query dataset dups. +// @Accept json +// @Produce json +// @Param userID path string true "User ID" +// @Param repoID path string true "Repo ID" +// @Param candidate_dataset_ids query string true "e.g. candidate_dataset_ids=xxx,yyy" +// @Param corrodee_dataset_ids query string false "dataset_ids to be corroded" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': 'duplication: 50, total_count: {xxx: 100, yyy: 200}'" +// @Router /api/v1/users/{userID}/repo/{repoID}/dataset_duplication [get] +func (s *ViewerServer) handleDatasetDup(c *gin.Context) { + defer s.handleFailure(c) + + candidateMirRepos := []*constants.MirRepo{} + userID := c.Param("userID") + repoID := c.Param("repoID") + for _, v := range strings.Split(c.DefaultQuery("candidate_dataset_ids", ""), ",") { + if len(v) < 1 { + continue + } + candidateMirRepos = append(candidateMirRepos, &constants.MirRepo{ + SandboxRoot: s.sandbox, + UserID: userID, + RepoID: repoID, + BranchID: v, + TaskID: v, + }) + } + if len(candidateMirRepos) <= 0 { + ViewerFailure(c, &FailureResult{Code: constants.CodeViewerInvalidParms, + Msg: "Invalid candidate_dataset_ids."}) + return + } + + corrodeeMirRepos := []*constants.MirRepo{} + for _, v := range strings.Split(c.DefaultQuery("corrodee_dataset_ids", ""), ",") { + if len(v) < 1 { + continue + } + corrodeeMirRepos = append(corrodeeMirRepos, &constants.MirRepo{ + SandboxRoot: s.sandbox, + UserID: userID, + RepoID: repoID, + BranchID: v, + TaskID: v, + }) + } + + resultData := s.handler.GetDatasetDupHandler(candidateMirRepos, corrodeeMirRepos) + ViewerSuccess(c, resultData) +} + +// @Summary Query model info. +// @Accept json +// @Produce json +// @Param userID path string true "User ID" +// @Param repoID path string true "Repo ID" +// @Param branchID path string true "Branch ID" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': constants.MirdataModel" +// @Router /api/v1/users/{userID}/repo/{repoID}/branch/{branchID}/model_info [get] +func (s *ViewerServer) handleModelInfo(c *gin.Context) { + defer s.handleFailure(c) + + mirRepo := s.buildMirRepoFromParam(c) + + resultData := s.handler.GetModelInfoHandler(mirRepo) + ViewerSuccess(c, resultData) +} + +func (s *ViewerServer) handleHealth(c *gin.Context) { + ViewerSuccess(c, "Healthy") +} + +// @Summary Record metrics signals. +// @Accept json +// @Produce json +// @Param metricsGroup path string true "metrics_group" +// @Param ID post string true "id" +// @Param createTime post timestamp true "create_time" +// @Param classIDs post string true "e.g. class_ids=0,1,2" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': ''" +// @Router /api/v1/user_metrics/:metrics_group [post] +func (s *ViewerServer) handleMetricsRecord(c *gin.Context) { + metricsGroup := c.Param("metrics_group") + if len(metricsGroup) <= 0 { + ViewerFailure(c, &FailureResult{Code: constants.CodeViewerInvalidParms, + Msg: "Missing metricsGroup."}) + return + } + + if len(c.PostForm("id")) < 1 || len(c.PostForm("create_time")) < 1 || + len(c.PostForm("user_id")) < 1 || len(c.PostForm("project_id")) < 1 || len(c.PostForm("class_ids")) < 1 { + ViewerFailure(c, &FailureResult{Code: constants.CodeViewerInvalidParms, + Msg: "Missing required fields: id or create_time or user_id or project_id or class_ids."}) + return + } + + dataMap := map[string]interface{}{} + if err := c.Request.ParseForm(); err != nil { + panic(err) + } + for key, value := range c.Request.PostForm { + dataMap[key] = value[0] + } + + // Normalize params. + classIDsKey := "class_ids" + createTimeKey := "create_time" + dataMap[classIDsKey] = s.getIntSliceFromString(dataMap[classIDsKey].(string)) + // Parse time from timestamp. + createTime, err := strconv.ParseInt(dataMap[createTimeKey].(string), 10, 64) + if err != nil { + panic(err) + } + dataMap[createTimeKey] = time.Unix(createTime, 0) + + log.Printf("recording metrics group: %s dataMap: %#v", metricsGroup, dataMap) + s.handler.MetricsRecordHandler(metricsGroup, dataMap) + ViewerSuccess(c, "") +} + +// @Summary Query metrics signals. +// @Accept json +// @Produce json +// @Param metricsGroup path string true "metrics_group" +// @Param userID query string true "user_id for filter" +// @Param classIDs query string true "class_ids for filter, e.g. 1,2,3,4,5" +// @Param queryField query string true "query_field: field of data to query" +// @Param bucket query string true "bucket type, e.g. bucket=count/time" +// @Param unit query string true "valid with bucket=time e.g. unit=day week month" +// @Param limit query string false "limit, default is 8" +// @Success 200 {string} string "'code': 0, 'msg': 'Success', 'Success': true, 'result': ''" +// @Router /api/v1/user_metrics/:metrics_group [get] +func (s *ViewerServer) handleMetricsQuery(c *gin.Context) { + metricsGroup := c.Param("metrics_group") + if len(metricsGroup) <= 0 { + ViewerFailure(c, &FailureResult{Code: constants.CodeViewerInvalidParms, + Msg: "Missing metricsGroup."}) + return + } + + // validate queries. + userID := c.DefaultQuery("user_id", "") + queryField := c.DefaultQuery("query_field", "") + bucket := c.DefaultQuery("bucket", "") + if len(userID) < 1 || len(queryField) < 1 || len(bucket) < 1 { + ViewerFailure(c, &FailureResult{Code: constants.CodeViewerInvalidParms, + Msg: "Missing required field: user_id, query_field, bucket"}) + return + } + limit := s.getInt(c.DefaultQuery("limit", "0")) + if limit < 1 { + limit = 8 + } + unit := c.DefaultQuery("unit", "") + + // Optional filter field. + classIDs := s.getIntSliceFromString(c.DefaultQuery("class_ids", "")) + + result := s.handler.MetricsQueryHandler(metricsGroup, userID, classIDs, queryField, bucket, unit, limit) + log.Printf("MetricsQuery result: %+v", result) + ViewerSuccess(c, result) +} + +func (s *ViewerServer) handleFailure(c *gin.Context) { + if r := recover(); r != nil { + if s, ok := r.(string); ok { + r = errors.New(s) + } + if r, ok := r.(error); ok { + ViewerFailureFromErr(c, r) + return + } + + panic(fmt.Sprintf("unhandled error type: %T\n", r)) + } +} diff --git a/ymir/backend/src/ymir_hel/viewer/server/server_test.go b/ymir/backend/src/ymir_hel/viewer/server/server_test.go new file mode 100644 index 0000000000..b606ac8ec9 --- /dev/null +++ b/ymir/backend/src/ymir_hel/viewer/server/server_test.go @@ -0,0 +1,396 @@ +package server + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/IndustryEssentials/ymir-hel/common/constants" + "github.com/IndustryEssentials/ymir-hel/configs" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +func TestCreateViewer(t *testing.T) { + server, _ := NewViewerServer(&configs.Config{ViewerURI: "127.0.0.1:9527"}) + + server.getInt("invalid") + server.getIntSliceFromString("") + server.getIntSliceFromString(",") + server.getIntSliceFromString("getIntSliceFromQuery") + + go server.Start() + server.Clear() +} + +type MockViewerHandler struct { + mock.Mock +} + +func (h *MockViewerHandler) GetAssetsHandler( + mirRepo *constants.MirRepo, + offset int, + limit int, + classIDs []int, + annoTypes []string, + currentAssetID string, + cmTypes []int, + cks []string, + tags []string, +) *constants.QueryAssetsResult { + args := h.Called(mirRepo, offset, limit, classIDs, currentAssetID, cmTypes, cks, tags) + return args.Get(0).(*constants.QueryAssetsResult) +} + +func (h *MockViewerHandler) GetDatasetDupHandler( + candidateMirRepos []*constants.MirRepo, + corrodeeMirRepos []*constants.MirRepo, +) *constants.QueryDatasetDupResult { + args := h.Called(candidateMirRepos, corrodeeMirRepos) + return args.Get(0).(*constants.QueryDatasetDupResult) +} + +func (h *MockViewerHandler) MetricsQueryHandler( + metricsGroup string, + userID string, + classIDs []int, + queryField string, + bucket string, + unit string, + limit int, +) *[]constants.MetricsQueryPoint { + args := h.Called(metricsGroup, userID, classIDs, queryField, bucket, unit, limit) + return args.Get(0).(*[]constants.MetricsQueryPoint) +} +func (h *MockViewerHandler) MetricsRecordHandler( + metricsGroup string, + postForm map[string]interface{}, +) { + h.Called(metricsGroup, postForm) +} + +func (h *MockViewerHandler) GetDatasetMetaCountsHandler( + mirRepo *constants.MirRepo, +) *constants.QueryDatasetStatsResult { + args := h.Called(mirRepo) + return args.Get(0).(*constants.QueryDatasetStatsResult) +} + +func (h *MockViewerHandler) GetDatasetStatsHandler( + mirRepo *constants.MirRepo, + classIDs []int, + requireAssetsHist bool, + requireAnnotationsHist bool, +) *constants.QueryDatasetStatsResult { + args := h.Called(mirRepo, classIDs, requireAssetsHist, requireAnnotationsHist) + return args.Get(0).(*constants.QueryDatasetStatsResult) +} + +func (h *MockViewerHandler) GetModelInfoHandler(mirRepo *constants.MirRepo) *constants.MirdataModel { + args := h.Called(mirRepo) + return args.Get(0).(*constants.MirdataModel) +} + +func buildResponseBody( + code constants.ResponseCode, + msg string, + success bool, + result interface{}, +) []byte { + resp := &ResultVO{Code: code, Msg: msg, Success: success, Result: result} + bytes, err := json.Marshal(resp) + if err != nil { + panic(err) + } + return bytes +} + +func TestStatsPageHandlerSuccess(t *testing.T) { + mockHandler := MockViewerHandler{} + viewer := &ViewerServer{gin: gin.Default(), handler: &mockHandler} + + r := viewer.gin + r.GET("/users/:userID/repo/:repoID/branch/:branchID/dataset_stats", viewer.handleDatasetStats) + + userID := "userID" + repoID := "repoID" + branchID := "branchID" + classIDs := []int{0, 1} + classIDsStr := "0,1" + statsRequestURL := fmt.Sprintf( + "/users/%s/repo/%s/branch/%s/dataset_stats?class_ids=%s", + userID, + repoID, + branchID, + classIDsStr, + ) + + statsExpectedResult := constants.NewQueryDatasetStatsResult() + for classID := range classIDs { + statsExpectedResult.Gt.ClassIDsCount[classID] = 0 + statsExpectedResult.Pred.ClassIDsCount[classID] = 0 + } + statsExpectedResponseData := buildResponseBody( + constants.CodeSuccess, + "Success", + true, + statsExpectedResult, + ) + + mirRepo := constants.MirRepo{UserID: userID, RepoID: repoID, BranchID: branchID, TaskID: branchID} + mockHandler.On("GetDatasetStatsHandler", &mirRepo, classIDs, false, false).Return(statsExpectedResult) + + req, _ := http.NewRequest("GET", statsRequestURL, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(statsExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestMetaCountPageHandlerSuccess(t *testing.T) { + mockHandler := MockViewerHandler{} + viewer := &ViewerServer{gin: gin.Default(), handler: &mockHandler} + + r := viewer.gin + r.GET("/users/:userID/repo/:repoID/branch/:branchID/dataset_meta_count", viewer.handleDatasetMetaCounts) + + userID := "userID" + repoID := "repoID" + branchID := "branchID" + metaRequestURL := fmt.Sprintf( + "/users/%s/repo/%s/branch/%s/dataset_meta_count", + userID, + repoID, + branchID, + ) + + metaExpectedResult := constants.NewQueryDatasetStatsResult() + metaExpectedResponseData := buildResponseBody( + constants.CodeSuccess, + "Success", + true, + metaExpectedResult, + ) + + mirRepo := constants.MirRepo{UserID: userID, RepoID: repoID, BranchID: branchID, TaskID: branchID} + mockHandler.On("GetDatasetMetaCountsHandler", &mirRepo).Return(metaExpectedResult) + + req, _ := http.NewRequest("GET", metaRequestURL, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(metaExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestMetaCountPageHandlerFailure(t *testing.T) { + mockHandler := MockViewerHandler{} + viewer := &ViewerServer{gin: gin.Default(), handler: &mockHandler} + + r := viewer.gin + r.GET("/users/:userID/repo/:repoID/branch/:branchID/dataset_meta_count", viewer.handleDatasetMetaCounts) + + userID := "userID" + repoID := "repoID" + branchID := "branchID" + metaRequestURL := fmt.Sprintf( + "/users/%s/repo/%s/branch/%s/dataset_meta_count", + userID, + repoID, + branchID, + ) + + failureResult := FailureResult{ + Code: constants.CodeViewerRepoNotExist, + Msg: "unknown ref", + } + statsExpectedResponseData := buildResponseBody( + failureResult.Code, + failureResult.Msg, + false, + failureResult, + ) + + mirRepo := constants.MirRepo{UserID: userID, RepoID: repoID, BranchID: branchID, TaskID: branchID} + mockHandler.On("GetDatasetMetaCountsHandler", &mirRepo).Panic("unknown ref") + + req, _ := http.NewRequest("GET", metaRequestURL, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(statsExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusBadRequest, w.Code) +} + +func TestDupPageHandlerSuccess(t *testing.T) { + mockHandler := MockViewerHandler{} + viewer := &ViewerServer{gin: gin.Default(), handler: &mockHandler} + + r := viewer.gin + r.GET("/users/:userID/repo/:repoID/dataset_duplication", viewer.handleDatasetDup) + + userID := "userID" + repoID := "repoID" + branchID0 := "branchID0" + branchID1 := "branchID1" + dupRequestURL := fmt.Sprintf( + "/users/%s/repo/%s/dataset_duplication?candidate_dataset_ids=%s,%s", + userID, + repoID, + branchID0, + branchID1, + ) + + dupCount := 100 + branchCount0 := int64(1000) + branchCount1 := int64(2000) + mockDupResult := &constants.QueryDatasetDupResult{ + Duplication: dupCount, + TotalCount: map[string]int64{branchID0: branchCount0, branchID1: branchCount1}, + } + statsExpectedResponseData := buildResponseBody( + constants.CodeSuccess, + "Success", + true, + mockDupResult, + ) + + // Set mock funcs. + mirRepo0 := constants.MirRepo{UserID: userID, RepoID: repoID, BranchID: branchID0, TaskID: branchID0} + mirRepo1 := constants.MirRepo{UserID: userID, RepoID: repoID, BranchID: branchID1, TaskID: branchID1} + mockHandler.On("GetDatasetDupHandler", []*constants.MirRepo{&mirRepo0, &mirRepo1}, []*constants.MirRepo{}). + Return(mockDupResult) + + req, _ := http.NewRequest("GET", dupRequestURL, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(statsExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) +} + +func TestDupPageHandlerFailure(t *testing.T) { + mockHandler := MockViewerHandler{} + viewer := &ViewerServer{gin: gin.Default(), handler: &mockHandler} + + r := viewer.gin + r.GET("/users/:userID/repo/:repoID/dataset_duplication", viewer.handleDatasetDup) + + userID := "userID" + repoID := "repoID" + dupRequestURL0 := fmt.Sprintf( + "/users/%s/repo/%s/dataset_duplication", + userID, + repoID, + ) + failureResult := FailureResult{ + Code: constants.CodeViewerInvalidParms, + Msg: "Invalid candidate_dataset_ids.", + } + statsExpectedResponseData := buildResponseBody( + failureResult.Code, + failureResult.Msg, + false, + failureResult, + ) + req, _ := http.NewRequest("GET", dupRequestURL0, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(statsExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusBadRequest, w.Code) + + dupRequestURL1 := fmt.Sprintf( + "/users/%s/repo/%s/dataset_duplication?candidate_dataset_ids=,", + userID, + repoID, + ) + failureResult = FailureResult{ + Code: constants.CodeViewerInvalidParms, + Msg: "Invalid candidate_dataset_ids.", + } + statsExpectedResponseData = buildResponseBody( + failureResult.Code, + failureResult.Msg, + false, + failureResult, + ) + req, _ = http.NewRequest("GET", dupRequestURL1, nil) + w = httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(statsExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusBadRequest, w.Code) +} + +func TestAssetsPageHandlerSuccess(t *testing.T) { + mockHandler := MockViewerHandler{} + viewer := &ViewerServer{gin: gin.Default(), handler: &mockHandler} + + r := viewer.gin + r.GET("/users/:userID/repo/:repoID/branch/:branchID/assets", viewer.handleAssets) + + userID := "userID" + repoID := "repoID" + branchID := "branchID" + offset := -1 + limit := 0 + classIDs := []int{0, 1} + classIDsStr := "0,1" + currentAssetID := "asset_id" + cmTypes := "0,1" + cks := "ck0,ck1" + tags := "tag0,tag1" + querySuffix := fmt.Sprintf("offset=%d&limit=%d&class_ids=%s¤t_asset_id=%s&cm_types=%s&cks=%s&tags=%s", + offset, + limit, + classIDsStr, + currentAssetID, + cmTypes, + cks, + tags, + ) + dupRequestURL := fmt.Sprintf( + "/users/%s/repo/%s/branch/%s/assets?%s", + userID, + repoID, + branchID, + querySuffix, + ) + + assetsExpectedResult := &constants.QueryAssetsResult{ + AssetsDetail: []constants.MirAssetDetail{}, + Offset: 0, + Limit: 1, + Anchor: int64(len(classIDs)), + TotalAssetsCount: 42, + } + assetsExpectedResponseData := buildResponseBody( + constants.CodeSuccess, + "Success", + true, + assetsExpectedResult, + ) + + revisedOffset := 0 + revisedLimit := 1 + revisedcmTypes := []int{0, 1} + revisedCks := []string{"ck0", "ck1"} + revisedTags := []string{"tag0", "tag1"} + mirRepo := constants.MirRepo{UserID: userID, RepoID: repoID, BranchID: branchID, TaskID: branchID} + mockHandler.On( + "GetAssetsHandler", + &mirRepo, + revisedOffset, + revisedLimit, + classIDs, + currentAssetID, + revisedcmTypes, + revisedCks, + revisedTags). + Return(assetsExpectedResult) + + req, _ := http.NewRequest("GET", dupRequestURL, nil) + w := httptest.NewRecorder() + r.ServeHTTP(w, req) + assert.Equal(t, string(assetsExpectedResponseData), w.Body.String()) + assert.Equal(t, http.StatusOK, w.Code) +} diff --git a/ymir/backend/src/ymir_monitor/monitor/libs/redis_handler.py b/ymir/backend/src/ymir_monitor/monitor/libs/redis_handler.py index 473222a1ee..981f9a6ebb 100644 --- a/ymir/backend/src/ymir_monitor/monitor/libs/redis_handler.py +++ b/ymir/backend/src/ymir_monitor/monitor/libs/redis_handler.py @@ -1,5 +1,5 @@ import json -from typing import Dict +from typing import Dict, List from redis import StrictRedis, Redis @@ -30,6 +30,12 @@ def get(self, name: str) -> str: def xadd(self, name: str, fields: Dict) -> None: self._redis.xadd(name, fields) + def batch_xadd(self, name: str, payloads: List[Dict]) -> None: + pipe = self._redis.pipeline() + for payload in payloads: + pipe.xadd(name, payload) + pipe.execute() + def hset(self, name: str, key: str, value: Dict) -> None: self._redis.hset(name=name, key=key, value=json.dumps(value)) diff --git a/ymir/backend/src/ymir_monitor/monitor/libs/services.py b/ymir/backend/src/ymir_monitor/monitor/libs/services.py index f58925a268..99c622ff85 100644 --- a/ymir/backend/src/ymir_monitor/monitor/libs/services.py +++ b/ymir/backend/src/ymir_monitor/monitor/libs/services.py @@ -4,7 +4,7 @@ from common_utils.percent_log_util import PercentLogHandler, LogState from monitor.config import settings from monitor.libs.redis_handler import RedisHandler -from monitor.schemas.task import TaskParameter, PercentResult, TaskStorageStructure, TaskExtraInfo +from monitor.schemas.task import TaskParameter, PercentResult, TaskStorageStructure from monitor.utils.errors import DuplicateTaskIDError, LogFileError logger = logging.getLogger(__name__) @@ -19,18 +19,22 @@ def add_single_task(self, task_id: str, task_info: Dict) -> None: def get_raw_log_contents(self, log_paths: Dict[str, float]) -> Dict[str, PercentResult]: result = dict() - for one_log_file in log_paths: + for log_path in log_paths: try: - percent_result = PercentLogHandler.parse_percent_log(one_log_file) + percent_result = PercentLogHandler.parse_percent_log(log_path) except ValueError as e: raise LogFileError(f"percent log content error {e}") - result[one_log_file] = percent_result + result[log_path] = percent_result return result @staticmethod - def merge_task_progress_contents(task_id: str, raw_log_contents: Dict[str, PercentResult], - log_path_weights: Dict[str, float]) -> PercentResult: + def merge_task_progress_contents( + task_id: str, raw_log_contents: Dict[str, PercentResult], log_path_weights: Dict[str, float] + ) -> PercentResult: + """ + calculate weighted sum progress (in percent) from multi-stages tasks + """ percent = 0.0 log_files_state_set = set() max_timestamp_content = None @@ -75,29 +79,27 @@ def check_existence(self, task_id: str) -> bool: return running_existence or finished_existence - def register_task(self, reg_parameters: TaskParameter) -> None: - if self.check_existence(reg_parameters.task_id): - raise DuplicateTaskIDError(f"duplicate task id {reg_parameters.task_id}") + def register_task(self, task_parameter: TaskParameter) -> None: + if self.check_existence(task_parameter.task_id): + raise DuplicateTaskIDError(f"duplicate task id {task_parameter.task_id}") - log_path_weights = reg_parameters.log_path_weights + log_path_weights = task_parameter.log_path_weights raw_log_contents = self.get_raw_log_contents(log_path_weights) - if len(raw_log_contents) != len(reg_parameters.log_path_weights): + if len(raw_log_contents) != len(task_parameter.log_path_weights): raise LogFileError percent_result = self.merge_task_progress_contents( - task_id=reg_parameters.task_id, + task_id=task_parameter.task_id, raw_log_contents=raw_log_contents, log_path_weights=log_path_weights, ) - task_extra_info = TaskExtraInfo.parse_obj(reg_parameters.dict()) - percent_result = PercentResult.parse_obj(percent_result.dict()) task_info = TaskStorageStructure( raw_log_contents=raw_log_contents, - task_extra_info=task_extra_info, + task_extra_info=task_parameter, percent_result=percent_result, ) - self.add_single_task(reg_parameters.task_id, task_info.dict()) + self.add_single_task(task_parameter.task_id, task_info.dict()) logger.info(f"register task successful: {task_info.dict()} ") diff --git a/ymir/backend/src/ymir_monitor/monitor/schemas/task.py b/ymir/backend/src/ymir_monitor/monitor/schemas/task.py index e99d5390eb..9c53c13c25 100644 --- a/ymir/backend/src/ymir_monitor/monitor/schemas/task.py +++ b/ymir/backend/src/ymir_monitor/monitor/schemas/task.py @@ -14,7 +14,6 @@ class MonitorType(IntEnum): class TaskParameter(BaseModel): task_id: str - user_id: str monitor_type: MonitorType = MonitorType.PERCENT log_path_weights: Dict[str, float] description: Optional[str] @@ -22,7 +21,7 @@ class TaskParameter(BaseModel): @validator("log_path_weights") def check_files(cls, log_path_weights: Dict[str, float]) -> Dict[str, float]: for log_path in log_path_weights: - if not os.path.exists(log_path): + if not os.path.isfile(log_path): raise ValueError(f"log_path not exists {log_path}") delta = 0.001 @@ -33,7 +32,6 @@ def check_files(cls, log_path_weights: Dict[str, float]) -> Dict[str, float]: class TaskExtraInfo(BaseModel): - user_id: Optional[str] = None monitor_type: MonitorType = MonitorType.PERCENT log_path_weights: Dict[str, float] description: Optional[str] @@ -43,10 +41,3 @@ class TaskStorageStructure(BaseModel): raw_log_contents: Dict[str, PercentResult] task_extra_info: TaskExtraInfo percent_result: PercentResult - - -class TaskSetStorageStructure(BaseModel): - __root__: Dict[str, TaskStorageStructure] - - def dict(self) -> Dict: # type: ignore - return super().dict()["__root__"] diff --git a/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py b/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py index 1965667bd8..1836d5be9a 100644 --- a/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py +++ b/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py @@ -1,8 +1,8 @@ -from datetime import datetime -import json +import itertools import logging import sys -from typing import List +import time +from typing import List, Optional import sentry_sdk from apscheduler.schedulers.blocking import BlockingScheduler @@ -13,83 +13,114 @@ from monitor.libs import redis_handler from monitor.libs.redis_handler import RedisHandler from monitor.libs.services import TaskService -from monitor.schemas.task import TaskSetStorageStructure - - -def send_updated_task(redis_client: RedisHandler, updated_info: TaskSetStorageStructure) -> None: - for event in updated_info.dict().values(): - logging.info("send_updated_task: %s to redis stream", event) - redis_client.xadd(settings.APP_REDIS_STREAM, {"payload": json.dumps(event)}) - - -def process_updated_task( - redis_client: RedisHandler, - task_updated_model: TaskSetStorageStructure, - task_id_finished: List[str], -) -> None: - # sentry will catch Exception - send_updated_task(redis_client, task_updated_model) - task_updated = task_updated_model.dict() - redis_client.hmset(settings.MONITOR_RUNNING_KEY, mapping=task_updated) - if task_id_finished: - redis_client.hmset( - settings.MONITOR_FINISHED_KEY, mapping={task_id: task_updated[task_id] for task_id in task_id_finished} - ) - redis_client.hdel(settings.MONITOR_RUNNING_KEY, *task_id_finished) +from monitor.schemas.task import TaskStorageStructure + + +def notify_updated_task(redis_client: RedisHandler, task_infos: List[TaskStorageStructure]) -> None: + """ + Enqueue updated task to Redis Stream. + Ymir App will fetch and pass them to Frontend. + """ + payloads = [{"payload": task_info.json()} for task_info in task_infos] + logging.info("notify_updated_task: %s to redis stream", payloads) + redis_client.batch_xadd(settings.APP_REDIS_STREAM, payloads) + + +def _update_redis_for_running_tasks(redis_client: RedisHandler, task_infos: List[TaskStorageStructure]) -> None: + task_info_mapping = {task_info.percent_result.task_id: task_info.dict() for task_info in task_infos} + redis_client.hmset(settings.MONITOR_RUNNING_KEY, mapping=task_info_mapping) + logging.info(f"processed redis key for updated task ids {task_info_mapping.keys()}") + + +def _update_redis_for_finished_tasks(redis_client: RedisHandler, task_infos: List[TaskStorageStructure]) -> None: + task_info_mapping = {task_info.percent_result.task_id: task_info.dict() for task_info in task_infos} + redis_client.hmset(settings.MONITOR_FINISHED_KEY, mapping=task_info_mapping) + redis_client.hdel(settings.MONITOR_RUNNING_KEY, *task_info_mapping.keys()) + logging.info(f"processed redis key for finished task ids {task_info_mapping.keys()}") - logging.info(f"finished task ids {task_id_finished}") +def update_monitor_redis(redis_client: RedisHandler, task_infos: List[TaskStorageStructure]) -> None: + for state, tasks in itertools.groupby( + sorted(task_infos, key=lambda x: x.percent_result.state), key=lambda x: x.percent_result.state + ): + # group tasks by states so as to bulk process tasks in the same state in one go + if state in [LogState.DONE, LogState.ERROR]: + _update_redis_for_finished_tasks(redis_client, list(tasks)) + else: + _update_redis_for_running_tasks(redis_client, list(tasks)) -def update_monitor_percent_log() -> None: + +def read_latest_log(log_path: str, task_id: str) -> Optional[PercentResult]: + try: + percent_result = PercentLogHandler.parse_percent_log(log_path) + except EOFError: + msg = f"skip empty log file: {log_path}" + sentry_sdk.capture_exception() + logging.exception(msg) + return None + except Exception: + msg = f"failed to parse log file: {log_path}" + sentry_sdk.capture_exception() + logging.exception(msg) + percent_result = PercentResult( + task_id=task_id, + timestamp=f"{time.time():.6f}", + percent=1.0, + state=LogState.ERROR, + state_code=MonitorErrorCode.PERCENT_LOG_PARSE_ERROR, + state_message=msg, + ) + return percent_result + + +def monitor_task_logs() -> None: + """ + Periodically monitor task logs. + Only registered tasks will be checked. + """ redis_client = redis_handler.RedisHandler() - contents = redis_client.hgetall(settings.MONITOR_RUNNING_KEY) - - task_updated = dict() - task_id_finished = [] - for task_id, content in contents.items(): - flag_task_updated = False - runtime_log_contents = dict() - logging.info(f"content: {content}") - for log_path, previous_log_content in content["raw_log_contents"].items(): - try: - runtime_log_content = PercentLogHandler.parse_percent_log(log_path) - except ValueError as e: - sentry_sdk.capture_exception(e) - logging.exception(e) - runtime_log_content = PercentResult(task_id=task_id, - timestamp=f"{datetime.now().timestamp():.6f}", - percent=1.0, - state=LogState.ERROR, - state_code=MonitorErrorCode.PERCENT_LOG_PARSE_ERROR, - state_message=f"logfile parse error: {log_path}") - - runtime_log_contents[log_path] = runtime_log_content - if runtime_log_content.timestamp != previous_log_content["timestamp"]: - flag_task_updated = True - - if flag_task_updated: - task_extra_info = content["task_extra_info"] - content_merged = TaskService.merge_task_progress_contents( + running_tasks = redis_client.hgetall(settings.MONITOR_RUNNING_KEY) + + updated_tasks = [] + for task_id, task_info in running_tasks.items(): + # task_info: TaskStorageStructure.dict() + is_updated_task = False + raw_log_contents = {} + logging.info(f"previous percent_result: {task_info['percent_result']}") + for log_path, previous_percent_result in task_info["raw_log_contents"].items(): + percent_result = read_latest_log(log_path, task_id) + if not percent_result: + continue + logging.info(f"current percent_result: {percent_result}") + raw_log_contents[log_path] = percent_result + if percent_result.timestamp != previous_percent_result["timestamp"]: + is_updated_task = True + + if is_updated_task: + task_extra_info = task_info["task_extra_info"] + merged_percent_result = TaskService.merge_task_progress_contents( task_id=task_id, - raw_log_contents=runtime_log_contents, + raw_log_contents=raw_log_contents, log_path_weights=task_extra_info["log_path_weights"], ) - if content_merged.state in [LogState.DONE, LogState.ERROR]: - task_id_finished.append(task_id) - task_updated[task_id] = dict( - raw_log_contents=runtime_log_contents, - task_extra_info=task_extra_info, - percent_result=content_merged, + logging.info(f"merged percent_result: {merged_percent_result}") + updated_tasks.append( + TaskStorageStructure( + raw_log_contents=raw_log_contents, + task_extra_info=task_extra_info, + percent_result=merged_percent_result, + ) ) - if len(task_updated): - task_updated_model = TaskSetStorageStructure.parse_obj(task_updated) - process_updated_task(redis_client, task_updated_model, task_id_finished) + if not updated_tasks: + return + notify_updated_task(redis_client, updated_tasks) + update_monitor_redis(redis_client, updated_tasks) if __name__ == "__main__": logging.basicConfig(stream=sys.stdout, format="%(levelname)-8s: [%(asctime)s] %(message)s", level=logging.INFO) sentry_sdk.init(settings.MONITOR_SENTRY_DSN) sched = BlockingScheduler() - sched.add_job(update_monitor_percent_log, "interval", seconds=settings.INTERVAL_SECONDS) + sched.add_job(monitor_task_logs, "interval", seconds=settings.INTERVAL_SECONDS) sched.start() diff --git a/ymir/backend/src/ymir_monitor/tests/test_api.py b/ymir/backend/src/ymir_monitor/tests/test_api.py index 02a39d7176..92d8068647 100644 --- a/ymir/backend/src/ymir_monitor/tests/test_api.py +++ b/ymir/backend/src/ymir_monitor/tests/test_api.py @@ -3,19 +3,16 @@ class TestReg: def test_reg(self, client: TestClient, clear_redislite, mocker): - mocker.patch("os.path.exists", return_value=True) + mocker.patch("os.path.isfile", return_value=True) data = "t0000003000003df78d31639637101 21245543 0.50 2" mocker.patch("builtins.open", mocker.mock_open(read_data=data)) - body = dict( - task_id="abcdadf", - user_id="12", - log_path_weights={ - "/data/test/monitor.txtaa": 0.5, - "/data/test/m2.txtaa": 0.5 - }, - ) + body = { + "task_id": "abcdadf", + "user_id": "12", + "log_path_weights": {"/data/test/monitor.txtaa": 0.5, "/data/test/m2.txtaa": 0.5}, + } r = client.post("/api/v1/tasks", json=body) assert r.status_code == 200 diff --git a/ymir/backend/src/ymir_monitor/tests/test_cron.py b/ymir/backend/src/ymir_monitor/tests/test_cron.py index 5854f1a8b4..c83697060e 100644 --- a/ymir/backend/src/ymir_monitor/tests/test_cron.py +++ b/ymir/backend/src/ymir_monitor/tests/test_cron.py @@ -1,25 +1,28 @@ import requests from fastapi.testclient import TestClient -from monitor.utils.crontab_job import update_monitor_percent_log +from monitor.utils.crontab_job import monitor_task_logs -def test_monitor_percent_log(client: TestClient, clear_redislite, mocker): - mocker.patch("os.path.exists", return_value=True) - data = "task_id_1 21245543 0.50 2" - mocker.patch("builtins.open", mocker.mock_open(read_data=data)) - body = dict(task_id="task_id_1", user_id="12", log_path_weights={"/data/test/monitor.txt": 1.0},) +def test_monitor_percent_log(client: TestClient, clear_redislite, mocker, tmp_path): + log_path = tmp_path / "monitor.txt" + log_path.write_text("task_id_1 21245543 0.50 2") + + body = { + "task_id": "task_id_1", + "user_id": "12", + "log_path_weights": {str(log_path): 1.0}, + } client.post("/api/v1/tasks", json=body) - data = "task_id_1 21245567 1 3" - mocker.patch("builtins.open", mocker.mock_open(read_data=data)) + log_path.write_text("task_id_1 21245567 1 3") mock_resp = mocker.Mock() mock_resp.raise_for_status = mocker.Mock() mock_resp.status_code = 200 mocker.patch.object(requests, "post", return_value=mock_resp) - update_monitor_percent_log() + monitor_task_logs() r = client.get("/api/v1/finished_tasks") @@ -27,7 +30,7 @@ def test_monitor_percent_log(client: TestClient, clear_redislite, mocker): "result": { "task_id_1": { "raw_log_contents": { - "/data/test/monitor.txt": { + str(log_path): { "task_id": "task_id_1", "timestamp": "21245567.0", "percent": 1.0, @@ -38,11 +41,8 @@ def test_monitor_percent_log(client: TestClient, clear_redislite, mocker): } }, "task_extra_info": { - "user_id": "12", "monitor_type": 1, - "log_path_weights": { - "/data/test/monitor.txt": 1.0 - }, + "log_path_weights": {str(log_path): 1.0}, "description": None, }, "percent_result": { diff --git a/ymir/backend/src/ymir_viz/.coveragerc b/ymir/backend/src/ymir_viz/.coveragerc deleted file mode 100644 index 5d000562d2..0000000000 --- a/ymir/backend/src/ymir_viz/.coveragerc +++ /dev/null @@ -1,6 +0,0 @@ -[run] -omit = - src/ymir_viz/src/swagger_models/* - src/ymir_viz/src/swagger/* - src/ymir_viz/src/type_util.py - src/ymir_viz/src/util.py diff --git a/ymir/backend/src/ymir_viz/README.md b/ymir/backend/src/ymir_viz/README.md deleted file mode 100644 index 5f94d48401..0000000000 --- a/ymir/backend/src/ymir_viz/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# YMIR-VIZ - -> https://github.com/IndustryEssentials/ymir - -## Features - -- Load ymir-cmd's persistence files, provide RESTful API for query based on Redis - -## External dependency - -- ymir-command's persistence file path (sandbox path) - -## Development - -To install dev dependencies you can use the following command: - -```bash -pip3 install -r requirements.txt -r requirements-dev.txt -``` - -To contribute to the framework - -- Contribute to definition of API: - - Edit `./doc/ymir_viz_API.yaml` - - run `sh codegen.sh` to generate code -- Do not edit `./src/swagger_models` and `./src/swagger`,which are generated by `swagger-codegen` - -Run local server with the following command: - -```bash -python wsgi.py -``` - -Then you can see the API definition by Swagger in browser: - -``` -http://localhost:9099/v1/ui/ -``` - -For more information about swagger-codegen, have a look [here](https://github.com/swagger-api/swagger-codegen). - -### Tests - -Unit tests are within the tests folder and we recommend to run them using `tox`. - -```bash -tox -``` diff --git a/ymir/backend/src/ymir_viz/codegen.sh b/ymir/backend/src/ymir_viz/codegen.sh deleted file mode 100644 index 35d771d694..0000000000 --- a/ymir/backend/src/ymir_viz/codegen.sh +++ /dev/null @@ -1,20 +0,0 @@ -CODE_FOLDER=$( - cd "$(dirname "$0")" - pwd -) - -rm -rf ./codegen_output/* -mkdir -p ./codegen_output/ -echo "$CODE_FOLDER" - -docker run --user $(id -u ${USER}):$(id -g ${USER}) --rm -v ${CODE_FOLDER}:/local swaggerapi/swagger-codegen-cli-v3 generate \ --i /local/doc/ymir_viz_API.yaml \ ---model-package swagger_models \ --l python-flask \ --o /local/codegen_output - -sed -i "s/swagger_server/src/g" $(grep swagger_server -rl ${CODE_FOLDER}/codegen_output/*) - -cp -rf ${CODE_FOLDER}/codegen_output/swagger_server/swagger_models ${CODE_FOLDER}/src/ -cp -rf ${CODE_FOLDER}/codegen_output/swagger_server/swagger ${CODE_FOLDER}/src/ -rm -rf ./codegen_output diff --git a/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml b/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml deleted file mode 100644 index a917a1f0ed..0000000000 --- a/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml +++ /dev/null @@ -1,427 +0,0 @@ -openapi: 3.0.1 -info: - title: Ymir-viz - description: Ymir-viz - contact: - email: test@ymir.ai - license: - name: Apache 2.0 - url: "https://www.apache.org/licenses/LICENSE-2.0.html" - version: 1.0.0 -servers: - - url: "http://xxx.com/v1" -tags: - - name: asset - description: Assets represent the smallest granular resources, like pictures - - name: model - description: Training task produces model -paths: - "/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/datasets": - get: - tags: - - dataset - summary: "get dataset info" - description: "get dataset info" - operationId: get_dataset_info - parameters: - - name: user_id - in: path - description: user_id - required: true - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: "#/components/schemas/DatasetResult" - "400": - description: Dataset not exists - "/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets": - get: - tags: - - asset - summary: "get assets info from special user_id,repo_id,branch_id" - description: "get assets info from special user_id,repo_id,branch_id, sort by asset_id" - operationId: get_asserts_info - parameters: - - name: user_id - in: path - description: user_id - required: true - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - schema: - type: string - - name: offset - in: query - description: offset - required: false - schema: - type: integer - default: 0 - - name: limit - in: query - description: limit - required: false - schema: - type: integer - default: 20 - maximum: 100 - minimum: 0 - - name: class_id - in: query - description: single class id - required: false - schema: - type: integer - default: false - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: "#/components/schemas/AssetsResult" - "400": - description: Branch not exists - content: {} - "/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets/{asset_id}": - get: - tags: - - asset - summary: "get asset_id info" - description: "" - operationId: get_assert_id_Info - parameters: - - name: user_id - in: path - description: user_id - required: true - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - schema: - type: string - - name: asset_id - in: path - description: asset_id - required: true - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: "#/components/schemas/AssetMetaResult" - "400": - description: Branch not exists - content: {} - "/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/models": - get: - tags: - - model - summary: "get model info" - description: "get model info" - operationId: get_model_info - parameters: - - name: user_id - in: path - description: user_id - required: true - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: "#/components/schemas/ModelResult" - "400": - description: Task not exists - "/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/evaluations": - get: - tags: - - evaluation - summary: "get dataset evaluation result" - description: "get dataset evaluation result" - operationId: get_dataset_evaluations - parameters: - - name: user_id - in: path - description: user_id - required: true - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: "#/components/schemas/DatasetEvaluationResult" - "400": - description: DatasetEvaluation not exists -components: - schemas: - AssetInfo: - type: object - properties: - asset_id: - type: string - class_ids: - type: array - items: - type: integer - example: [2, 4] - # annotations: - # $ref: '#/components/schemas/Annotations' - AssetsResult: - allOf: - - $ref: "#/components/schemas/ApiResponse" - - type: object - properties: - result: - type: object - properties: - elements: - type: array - items: - $ref: "#/components/schemas/AssetInfo" - offset: - type: integer - limit: - type: integer - tatal: - type: integer - AssetMetaResult: - allOf: - - $ref: "#/components/schemas/ApiResponse" - - type: object - properties: - result: - $ref: "#/components/schemas/AssetsMetaInfo" - AssetsMetaInfo: - type: object - properties: - annotations: - $ref: "#/components/schemas/Annotations" - class_ids: - type: array - items: - type: integer - metadata: - type: object - properties: - asset_type: - type: string - width: - type: integer - height: - type: integer - image_channels: - type: integer - timestamp: - type: object - properties: - start: - type: integer - example: - annotations: - - box: - x: 121 - y: 1 - w: 33 - h: 44 - class_id: 4 - score: 0.33 - class_ids: - - 4 - - 6 - metadata: - asset_type: pic - width: 100 - height: 100 - image_channels: 1 - timestamp: { "start": 45645 } - ModelResult: - allOf: - - $ref: "#/components/schemas/ApiResponse" - - type: object - properties: - result: - type: object - properties: - model_id: - type: string - model_mAP: - type: integer - maximum: 1 - minimum: 0 - description: mean average precision of the model - example: 0.5 - task_parameters: - type: string - executor_config: - type: string - DatasetResult: - allOf: - - $ref: "#/components/schemas/ApiResponse" - - type: object - properties: - result: - type: object - properties: - total_images_cnt: - type: integer - class_ids_count: - type: object - example: "{3:8, 4:2}" - class_names_count: - type: object - example: "{'cat':8, 'dog':2}" - ignored_labels: - type: object - example: "{'cat':8}" - negative_info: - type: object - properties: - negative_images_cnt: - type: integer - project_negative_images_cnt: - type: integer - DatasetEvaluationResult: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - result: - type: object - additionalProperties: - type: object - properties: - conf_thr: - type: number - iou_evaluations: - type: object - additionalProperties: - $ref: '#/components/schemas/DatasetEvaluation' - iou_averaged_evaluation: - $ref: '#/components/schemas/DatasetEvaluation' - - ApiResponse: - type: object - properties: - code: - type: integer - format: int32 - request_id: - type: string - message: - type: string - Annotations: - type: array - items: - type: object - properties: - box: - type: object - properties: - x: - type: integer - y: - type: integer - w: - type: integer - h: - type: integer - class_id: - type: integer - score: - type: integer - DatasetEvaluation: - type: object - properties: - ci_evaluations: - type: object - additionalProperties: - $ref: "#/components/schemas/DatasetEvaluationElement" - topic_evaluations: - type: object - additionalProperties: - $ref: "#/components/schemas/DatasetEvaluationElement" - ci_averaged_evaluation: - $ref: "#/components/schemas/DatasetEvaluationElement" - DatasetEvaluationElement: - type: object - properties: - ap: - type: number - ar: - type: number - tp: - type: integer - fp: - type: integer - fn: - type: integer diff --git a/ymir/backend/src/ymir_viz/gunicorn_conf.py b/ymir/backend/src/ymir_viz/gunicorn_conf.py deleted file mode 100644 index 02d591272b..0000000000 --- a/ymir/backend/src/ymir_viz/gunicorn_conf.py +++ /dev/null @@ -1,13 +0,0 @@ -bind = '0.0.0.0:9099' -workers = 4 - -timeout = 300 - -max_requests = 2000 -max_requests_jitter = 500 - -proc_name = 'ymir_viz' - -accesslog = '-' -errorlog = '-' -loglevel = 'info' diff --git a/ymir/backend/src/ymir_viz/redis/.bumpversion.cfg b/ymir/backend/src/ymir_viz/redis/.bumpversion.cfg deleted file mode 100644 index 4e81116b63..0000000000 --- a/ymir/backend/src/ymir_viz/redis/.bumpversion.cfg +++ /dev/null @@ -1,3 +0,0 @@ -[bumpversion] -current_version = 0.0.1 -commit = False diff --git a/ymir/backend/src/ymir_viz/redis/Dockerfile b/ymir/backend/src/ymir_viz/redis/Dockerfile deleted file mode 100644 index 7eca9d1c9a..0000000000 --- a/ymir/backend/src/ymir_viz/redis/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM redis - -COPY redis.conf /usr/local/etc/redis/redis.conf - -CMD [ "redis-server", "/usr/local/etc/redis/redis.conf" ] diff --git a/ymir/backend/src/ymir_viz/redis/redis.conf b/ymir/backend/src/ymir_viz/redis/redis.conf deleted file mode 100644 index 3eed0b3f32..0000000000 --- a/ymir/backend/src/ymir_viz/redis/redis.conf +++ /dev/null @@ -1,1317 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -# bind 127.0.0.1 -bind 0.0.0.0 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -# unixsocket /tmp/redis.sock -# unixsocketperm 700 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir ./ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. -# -# This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# Note that slaves never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a slave is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. -# -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -maxmemory 10000mb - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -maxmemory-policy allkeys-lru - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute the DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of a user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transfered. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -slave-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A slave of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a slave to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best -# replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single slave computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a slave will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * slave-validity-factor) + repl-ping-slave-period -# -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large slave-validity-factor may allow slaves with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. -# -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-slave-validity-factor 10 - -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. -# -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every -# master in your cluster. -# -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# This option, when set to yes, prevents slaves from trying to failover its -# master during master failures. However the master can still perform a -# manual failover, if forced to do so. -# -# This is useful in different scenarios, especially in the case of multiple -# data center operations, where we want one side to never be promoted if not -# in the case of a total DC failure. -# -# cluster-slave-no-failover no - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node knows its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Client query buffers accumulate new commands. They are limited to a fixed -# amount by default in order to avoid that a protocol desynchronization (for -# instance due to a bug in the client) will lead to unbound memory usage in -# the query buffer. However you can configure it here if you have very special -# needs, such us huge multi/exec requests or alike. -# -# client-query-buffer-limit 1gb - -# In the Redis protocol, bulk requests, that are, elements representing single -# strings, are normally limited ot 512 mb. However you can change this limit -# here. -# -# proto-max-bulk-len 512mb - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 diff --git a/ymir/backend/src/ymir_viz/src/app.py b/ymir/backend/src/ymir_viz/src/app.py deleted file mode 100644 index 285c2d7241..0000000000 --- a/ymir/backend/src/ymir_viz/src/app.py +++ /dev/null @@ -1,69 +0,0 @@ -import logging -import sys -import uuid -from typing import Dict, Tuple - -import connexion -import sentry_sdk -from flask import request, jsonify -from sentry_sdk.integrations.flask import FlaskIntegration -from werkzeug.wrappers import Response - -from id_definition.error_codes import VizErrorCode -from src.config import viz_settings -from src.encoder import JSONEncoder -from src.libs.exceptions import VizException - - -def config_app(app: connexion, config: Dict = None) -> None: - # load default configuration - app.config.from_object("src.config.viz_settings") - - # load app specified configuration if need - if config is not None and isinstance(config, dict): - app.config.update(config) - - -def init_logging() -> None: - logging_level = logging.DEBUG if viz_settings.VIZ_DEBUG_MODE else logging.INFO - logging.basicConfig(stream=sys.stdout, - format='%(levelname)-8s: [%(asctime)s] %(filename)s:%(lineno)s:%(funcName)s(): %(message)s', - datefmt='%Y%m%d-%H:%M:%S', - level=logging_level) - - -def create_connexion_app(config: Dict = None) -> connexion.App: - init_logging() - connexion_app = connexion.App(__name__, specification_dir="./swagger/") - app = connexion_app.app - app.json_encoder = JSONEncoder - config_app(app, config) - - sentry_sdk.init(dsn=viz_settings.VIZ_SENTRY_DSN, integrations=[FlaskIntegration()]) - - connexion_app.add_api("swagger.yaml", arguments={"title": "Ymir-viz API"}) - - @app.before_request - def init_request() -> None: - request_id = request.headers.get("request_id", str(uuid.uuid1())) - setattr(request.headers, "request_id", request_id) - - @app.errorhandler(VizException) - def handle_viz_exception(e: VizException) -> Tuple[Response, int]: - resp = dict(code=e.code, message=e.message) - - return jsonify(resp), e.status_code - - @app.errorhandler(Exception) - def handle_exception(e: Exception) -> Tuple[Response, int]: - logging.exception(e) - resp = dict(code=VizErrorCode.INTERNAL_ERROR, message=str(e)) - - return jsonify(resp), 500 - - # For test server - @app.route("/ping") - def ping() -> str: - return "pong" - - return connexion_app diff --git a/ymir/backend/src/ymir_viz/src/config.py b/ymir/backend/src/ymir_viz/src/config.py deleted file mode 100644 index 5cbb57bc02..0000000000 --- a/ymir/backend/src/ymir_viz/src/config.py +++ /dev/null @@ -1,28 +0,0 @@ -from typing import Optional - -from pydantic import BaseSettings - - -class VizSettings(BaseSettings): - BACKEND_SANDBOX_ROOT: str = "/data/mir_root" - - VIZ_REDIS_URI: str = "redis://" - - # the middle data structure, it will save into cache,like Redis - VIZ_MIDDLE_VERSION: str = "0.1" - - # redis key info - VIZ_KEY_ASSET_DETAIL: str = "viz_key_detail" - VIZ_KEY_ASSET_INDEX: str = "viz_key_index" - # added all assets index by viz - VIZ_ALL_INDEX_CLASSIDS: str = "__all_index_classids__" - # set flag status when generating cache - VIZ_CACHE_STATUS: str = "viz_key_status" - - VIZ_SENTRY_DSN: Optional[str] = None - REDIS_TESTING: bool = False - - VIZ_DEBUG_MODE: bool = False - - -viz_settings = VizSettings(_env_file=".env") # type: ignore diff --git a/ymir/backend/src/ymir_viz/src/controllers/asset_controller.py b/ymir/backend/src/ymir_viz/src/controllers/asset_controller.py deleted file mode 100644 index 2a45725727..0000000000 --- a/ymir/backend/src/ymir_viz/src/controllers/asset_controller.py +++ /dev/null @@ -1,35 +0,0 @@ -import logging - -from src.libs import utils -from src.swagger_models.asset_meta_result import AssetMetaResult -from src.viz_models import asset - - -# Return type: AssetMetaResult -def get_assert_id_info(user_id: str, repo_id: str, branch_id: str, asset_id: str) -> AssetMetaResult: - result = asset.AssetsModel(user_id, repo_id, branch_id).get_asset_id_info(asset_id) - - resp = utils.suss_resp(result=result) - logging.info(f"get_assert_id_info: {resp}") - - return AssetMetaResult(**resp) - - -# Return type: AssetMetaResult -def get_asserts_info( - user_id: str, - repo_id: str, - branch_id: str, - offset: int = 0, - limit: int = 20, - class_id: int = None, -) -> AssetMetaResult: - """ - API get assetst info - """ - result = asset.AssetsModel(user_id, repo_id, branch_id).get_assets_info(offset, limit, class_id) - - resp = utils.suss_resp(result=result) - logging.info(f"get_asserts_info: {resp}") - - return AssetMetaResult(**resp) diff --git a/ymir/backend/src/ymir_viz/src/controllers/dataset_controller.py b/ymir/backend/src/ymir_viz/src/controllers/dataset_controller.py deleted file mode 100644 index 64c835340a..0000000000 --- a/ymir/backend/src/ymir_viz/src/controllers/dataset_controller.py +++ /dev/null @@ -1,46 +0,0 @@ -import logging - -from src.config import viz_settings -from src.libs import utils -from src.swagger_models.dataset_result import DatasetResult -from src.viz_models import pb_reader - - -def get_dataset_info(user_id: str, repo_id: str, branch_id: str) -> DatasetResult: - """get dataset info - - get dataset info # noqa: E501 - - :param user_id: user_id - :type user_id: str - :param repo_id: repo_id - :type repo_id: str - :param branch_id: branch_id - :type branch_id: str - - :rtype: DatasetResult - - exampled return data: - { - "class_ids_count": {3: 34}, - "class_names_count": {'cat': 34}, - "ignored_labels": {'cat':5, }, - "negative_info": { - "negative_images_cnt": 0, - "project_negative_images_cnt": 0, - }, - "total_images_cnt": 1, - } - """ - dataset_info = pb_reader.MirStorageLoader( - sandbox_root=viz_settings.BACKEND_SANDBOX_ROOT, - user_id=user_id, - repo_id=repo_id, - branch_id=branch_id, - task_id=branch_id, - ).get_dataset_info() - - resp = utils.suss_resp(result=dataset_info) - logging.info(f"get_dataset_info: {resp}") - - return DatasetResult(**resp) diff --git a/ymir/backend/src/ymir_viz/src/controllers/evaluation_controller.py b/ymir/backend/src/ymir_viz/src/controllers/evaluation_controller.py deleted file mode 100644 index a1ff05d7ce..0000000000 --- a/ymir/backend/src/ymir_viz/src/controllers/evaluation_controller.py +++ /dev/null @@ -1,34 +0,0 @@ -import logging - -from src.config import viz_settings -from src.libs import utils -from src.swagger_models import DatasetEvaluationResult -from src.viz_models import pb_reader - - -def get_dataset_evaluations(user_id: str, repo_id: str, branch_id: str) -> DatasetEvaluationResult: - """ - get dataset evaluations result - - :param user_id: user_id - :type user_id: str - :param repo_id: repo_id - :type repo_id: str - :param branch_id: branch_id - :type branch_id: str - - :rtype: DatasetEvaluationResult - """ - evaluations = pb_reader.MirStorageLoader( - sandbox_root=viz_settings.BACKEND_SANDBOX_ROOT, - user_id=user_id, - repo_id=repo_id, - branch_id=branch_id, - task_id=branch_id, - ).get_dataset_evaluations() - - resp = utils.suss_resp() - resp["result"] = evaluations - logging.info("successfully get_dataset_evaluations from branch %s", branch_id) - - return DatasetEvaluationResult(**resp) diff --git a/ymir/backend/src/ymir_viz/src/controllers/model_controller.py b/ymir/backend/src/ymir_viz/src/controllers/model_controller.py deleted file mode 100644 index 973fe9b8f9..0000000000 --- a/ymir/backend/src/ymir_viz/src/controllers/model_controller.py +++ /dev/null @@ -1,42 +0,0 @@ -import logging - -from src.config import viz_settings -from src.libs import utils -from src.swagger_models.model_result import ModelResult -from src.viz_models import pb_reader - - -def get_model_info(user_id: str, repo_id: str, branch_id: str) -> ModelResult: - """ - get task model info - - :param user_id: user_id - :type user_id: str - :param repo_id: repo_id - :type repo_id: str - :param branch_id: branch_id - :type branch_id: str - - :rtype: ModelResult - """ - model_info = pb_reader.MirStorageLoader( - sandbox_root=viz_settings.BACKEND_SANDBOX_ROOT, - user_id=user_id, - repo_id=repo_id, - branch_id=branch_id, - task_id=branch_id, - ).get_model_info() - - resp = utils.suss_resp() - resp.update({ - "result": - dict( - model_id=model_info["model_hash"], - model_mAP=model_info["mean_average_precision"], - task_parameters=model_info["task_parameters"], - executor_config=model_info["executor_config"], - ) - }) - logging.info(f"get_model_info: {resp}") - - return ModelResult(**resp) diff --git a/ymir/backend/src/ymir_viz/src/encoder.py b/ymir/backend/src/ymir_viz/src/encoder.py deleted file mode 100644 index 8ab4241dbe..0000000000 --- a/ymir/backend/src/ymir_viz/src/encoder.py +++ /dev/null @@ -1,20 +0,0 @@ -import six -from connexion.apps.flask_app import FlaskJSONEncoder - -from src.swagger_models.base_model_ import Model - - -class JSONEncoder(FlaskJSONEncoder): - include_nulls = False - - def default(self, o): - if isinstance(o, Model): - dikt = {} - for attr, _ in six.iteritems(o.swagger_types): - value = getattr(o, attr) - if value is None and not self.include_nulls: - continue - attr = o.attribute_map[attr] - dikt[attr] = value - return dikt - return FlaskJSONEncoder.default(self, o) diff --git a/ymir/backend/src/ymir_viz/src/libs/cache.py b/ymir/backend/src/ymir_viz/src/libs/cache.py deleted file mode 100644 index 73851d9ad6..0000000000 --- a/ymir/backend/src/ymir_viz/src/libs/cache.py +++ /dev/null @@ -1,70 +0,0 @@ -import logging -from typing import Any, Dict, List - -import redis -import yaml -from werkzeug.local import LocalProxy - -from src.config import viz_settings - - -class RedisCache: - def __init__(self, rds_client: redis.Redis): - self._client = rds_client - - def get(self, key: str) -> Dict: - try: - raw_value = self._client.get(key) - except Exception as e: - logging.exception(f"{e}") - return dict() - if raw_value is None: - return dict() - content = yaml.safe_load(str(raw_value)) - - return content - - def set(self, key: str, value: Any, timeout: int = None) -> None: - if isinstance(value, dict): - value = yaml.safe_dump(value) - elif isinstance(value, str): - value = value - else: - raise ValueError(f"Invalid redis value type: {type(value)}") - self._client.set(key, value, timeout) - - def hmget(self, name: str, keys: List) -> List: - return self._client.hmget(name, keys) - - def lrange(self, name: str, start: int, end: int) -> List: - return self._client.lrange(name, start, end) - - def exists(self, names: str) -> int: - try: - return self._client.exists(names) - except Exception as e: - logging.exception(f"{e}") - return False - - def pipeline(self) -> Any: - return self._client.pipeline(transaction=False) - - def llen(self, name: str) -> int: - return self._client.llen(name) - - def hget(self, name: str, key: str) -> Dict: - res = self._client.hget(name, key) - return yaml.safe_load(str(res)) - - -def get_connect() -> redis.Redis: - if viz_settings.REDIS_TESTING: - import redislite - redis_con = redislite.StrictRedis("/tmp/redis.db") - else: - redis_con = redis.StrictRedis.from_url(str(viz_settings.VIZ_REDIS_URI), encoding="utf8", decode_responses=True) - return redis_con - - -proxy_rds_con = LocalProxy(get_connect) -redis_cache = RedisCache(proxy_rds_con) # type: ignore diff --git a/ymir/backend/src/ymir_viz/src/libs/exceptions.py b/ymir/backend/src/ymir_viz/src/libs/exceptions.py deleted file mode 100644 index 5a84bd5c90..0000000000 --- a/ymir/backend/src/ymir_viz/src/libs/exceptions.py +++ /dev/null @@ -1,45 +0,0 @@ -from typing import Optional, Dict - -from id_definition.error_codes import VizErrorCode - - -class VizException(Exception): - status_code = 400 - code = VizErrorCode.GENERAL_ERROR - message = "Exception Occured" - - def __init__( - self, - message: Optional[str] = None, - status_code: Optional[int] = None, - code: Optional[int] = None, - ): - super().__init__() - self.status_code = status_code or self.status_code - self.code = code or self.code - self.message = message or self.message - - def __str__(self) -> str: - return f"error_code: {self.code}, message: {self.message}" - - def to_dict(self) -> Dict: - return { - "status_code": self.status_code, - "code": self.code, - "message": self.message, - } - - -class BranchNotExists(VizException): - code = VizErrorCode.BRANCH_NOT_EXISTS - message = "branch not found" - - -class ModelNotExists(VizException): - code = VizErrorCode.MODEL_NOT_EXISTS - message = "model not found" - - -class DatasetEvaluationNotExists(VizException): - code = VizErrorCode.DATASET_EVALUATION_NOT_EXISTS - message = "dataset evaluation not found" diff --git a/ymir/backend/src/ymir_viz/src/libs/utils.py b/ymir/backend/src/ymir_viz/src/libs/utils.py deleted file mode 100644 index 0548afee36..0000000000 --- a/ymir/backend/src/ymir_viz/src/libs/utils.py +++ /dev/null @@ -1,29 +0,0 @@ -import logging -import time -from functools import wraps -from typing import Dict, Callable - -from flask import request - - -def suss_resp(code: int = 0, message: str = "operation successful", result: Dict = {}) -> Dict: - resp = dict( - code=code, - message=message, - request_id=request.headers.get("request_id"), - result=result, - ) - - return resp - - -def time_it(f: Callable) -> Callable: - @wraps(f) - def wrapper(*args: tuple, **kwargs: Dict) -> Callable: - _start = time.time() - _ret = f(*args, **kwargs) - _cost = time.time() - _start - logging.info(f"|-{f.__name__} costs {_cost:.2f}s({_cost / 60:.2f}m).") - return _ret - - return wrapper diff --git a/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml b/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml deleted file mode 100644 index 5a2e630020..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml +++ /dev/null @@ -1,490 +0,0 @@ -openapi: 3.0.1 -info: - title: Ymir-viz - description: Ymir-viz - contact: - email: test@ymir.ai - license: - name: Apache 2.0 - url: https://www.apache.org/licenses/LICENSE-2.0.html - version: 1.0.0 -servers: -- url: http://xxx.com/v1 -tags: -- name: asset - description: "Assets represent the smallest granular resources, like pictures" -- name: model - description: Training task produces model -paths: - /users/{user_id}/repositories/{repo_id}/branches/{branch_id}/datasets: - get: - tags: - - dataset - summary: get dataset info - description: get dataset info - operationId: get_dataset_info - parameters: - - name: user_id - in: path - description: user_id - required: true - style: simple - explode: false - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - style: simple - explode: false - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - style: simple - explode: false - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/DatasetResult' - "400": - description: Dataset not exists - x-openapi-router-controller: src.controllers.dataset_controller - /users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets: - get: - tags: - - asset - summary: "get assets info from special user_id,repo_id,branch_id" - description: "get assets info from special user_id,repo_id,branch_id, sort by\ - \ asset_id" - operationId: get_asserts_info - parameters: - - name: user_id - in: path - description: user_id - required: true - style: simple - explode: false - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - style: simple - explode: false - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - style: simple - explode: false - schema: - type: string - - name: offset - in: query - description: offset - required: false - style: form - explode: true - schema: - type: integer - default: 0 - - name: limit - in: query - description: limit - required: false - style: form - explode: true - schema: - maximum: 100 - minimum: 0 - type: integer - default: 20 - - name: class_id - in: query - description: single class id - required: false - style: form - explode: true - schema: - type: integer - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/AssetsResult' - "400": - description: Branch not exists - content: {} - x-openapi-router-controller: src.controllers.asset_controller - /users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets/{asset_id}: - get: - tags: - - asset - summary: get asset_id info - description: "" - operationId: get_assert_id_info - parameters: - - name: user_id - in: path - description: user_id - required: true - style: simple - explode: false - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - style: simple - explode: false - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - style: simple - explode: false - schema: - type: string - - name: asset_id - in: path - description: asset_id - required: true - style: simple - explode: false - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/AssetMetaResult' - "400": - description: Branch not exists - content: {} - x-openapi-router-controller: src.controllers.asset_controller - /users/{user_id}/repositories/{repo_id}/branches/{branch_id}/models: - get: - tags: - - model - summary: get model info - description: get model info - operationId: get_model_info - parameters: - - name: user_id - in: path - description: user_id - required: true - style: simple - explode: false - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - style: simple - explode: false - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - style: simple - explode: false - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/ModelResult' - "400": - description: Task not exists - x-openapi-router-controller: src.controllers.model_controller - /users/{user_id}/repositories/{repo_id}/branches/{branch_id}/evaluations: - get: - tags: - - evaluation - summary: get dataset evaluation result - description: get dataset evaluation result - operationId: get_dataset_evaluations - parameters: - - name: user_id - in: path - description: user_id - required: true - style: simple - explode: false - schema: - type: string - - name: repo_id - in: path - description: repo_id - required: true - style: simple - explode: false - schema: - type: string - - name: branch_id - in: path - description: branch_id - required: true - style: simple - explode: false - schema: - type: string - responses: - "200": - description: successful operation - content: - application/json: - schema: - $ref: '#/components/schemas/DatasetEvaluationResult' - "400": - description: DatasetEvaluation not exists - x-openapi-router-controller: src.controllers.evaluation_controller -components: - schemas: - AssetInfo: - type: object - properties: - asset_id: - type: string - class_ids: - type: array - items: - type: integer - AssetsResult: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - result: - $ref: '#/components/schemas/AssetsResult_result' - AssetMetaResult: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - result: - $ref: '#/components/schemas/AssetsMetaInfo' - AssetsMetaInfo: - type: object - properties: - annotations: - $ref: '#/components/schemas/Annotations' - class_ids: - type: array - items: - type: integer - metadata: - $ref: '#/components/schemas/AssetsMetaInfo_metadata' - example: - annotations: - - box: - x: 121 - "y": 1 - w: 33 - h: 44 - class_id: 4 - score: 0.33 - class_ids: - - 4 - - 6 - metadata: - asset_type: pic - width: 100 - height: 100 - image_channels: 1 - timestamp: - start: 45645 - ModelResult: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - result: - $ref: '#/components/schemas/ModelResult_result' - DatasetResult: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - result: - $ref: '#/components/schemas/DatasetResult_result' - DatasetEvaluationResult: - allOf: - - $ref: '#/components/schemas/ApiResponse' - - type: object - properties: - result: - type: object - additionalProperties: - $ref: '#/components/schemas/DatasetEvaluationResult_result' - ApiResponse: - type: object - properties: - code: - type: integer - format: int32 - request_id: - type: string - message: - type: string - Annotations: - type: array - items: - $ref: '#/components/schemas/Annotations_inner' - DatasetEvaluation: - type: object - properties: - ci_evaluations: - type: object - additionalProperties: - $ref: '#/components/schemas/DatasetEvaluationElement' - topic_evaluations: - type: object - additionalProperties: - $ref: '#/components/schemas/DatasetEvaluationElement' - ci_averaged_evaluation: - $ref: '#/components/schemas/DatasetEvaluationElement' - DatasetEvaluationElement: - type: object - properties: - ap: - type: number - ar: - type: number - tp: - type: integer - fp: - type: integer - fn: - type: integer - AssetsResult_result: - type: object - properties: - elements: - type: array - items: - $ref: '#/components/schemas/AssetInfo' - offset: - type: integer - limit: - type: integer - tatal: - type: integer - example: null - AssetsMetaInfo_metadata_timestamp: - type: object - properties: - start: - type: integer - example: null - AssetsMetaInfo_metadata: - type: object - properties: - asset_type: - type: string - width: - type: integer - height: - type: integer - image_channels: - type: integer - timestamp: - $ref: '#/components/schemas/AssetsMetaInfo_metadata_timestamp' - example: null - ModelResult_result: - type: object - properties: - model_id: - type: string - model_mAP: - maximum: 1 - minimum: 0 - type: integer - description: mean average precision of the model - task_parameters: - type: string - executor_config: - type: string - example: null - DatasetResult_result_negative_info: - type: object - properties: - negative_images_cnt: - type: integer - project_negative_images_cnt: - type: integer - example: null - DatasetResult_result: - type: object - properties: - total_images_cnt: - type: integer - class_ids_count: - type: object - example: "{3:8, 4:2}" - class_names_count: - type: object - example: "{'cat':8, 'dog':2}" - ignored_labels: - type: object - example: "{'cat':8}" - negative_info: - $ref: '#/components/schemas/DatasetResult_result_negative_info' - example: null - DatasetEvaluationResult_result: - type: object - properties: - conf_thr: - type: number - iou_evaluations: - type: object - additionalProperties: - $ref: '#/components/schemas/DatasetEvaluation' - iou_averaged_evaluation: - $ref: '#/components/schemas/DatasetEvaluation' - example: null - Annotations_inner: - type: object - properties: - box: - type: object - properties: - x: - type: integer - "y": - type: integer - w: - type: integer - h: - type: integer - class_id: - type: integer - score: - type: integer - diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py b/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py deleted file mode 100644 index 0156addaa7..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -# coding: utf-8 - -# flake8: noqa -from __future__ import absolute_import -# import models into model package -from src.swagger_models.annotations import Annotations -from src.swagger_models.annotations_inner import AnnotationsInner -from src.swagger_models.api_response import ApiResponse -from src.swagger_models.asset_info import AssetInfo -from src.swagger_models.asset_meta_result import AssetMetaResult -from src.swagger_models.assets_meta_info import AssetsMetaInfo -from src.swagger_models.assets_meta_info_metadata import AssetsMetaInfoMetadata -from src.swagger_models.assets_meta_info_metadata_timestamp import AssetsMetaInfoMetadataTimestamp -from src.swagger_models.assets_result import AssetsResult -from src.swagger_models.assets_result_result import AssetsResultResult -from src.swagger_models.dataset_evaluation import DatasetEvaluation -from src.swagger_models.dataset_evaluation_element import DatasetEvaluationElement -from src.swagger_models.dataset_evaluation_result import DatasetEvaluationResult -from src.swagger_models.dataset_evaluation_result_result import DatasetEvaluationResultResult -from src.swagger_models.dataset_result import DatasetResult -from src.swagger_models.dataset_result_result import DatasetResultResult -from src.swagger_models.dataset_result_result_negative_info import DatasetResultResultNegativeInfo -from src.swagger_models.model_result import ModelResult -from src.swagger_models.model_result_result import ModelResultResult diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py b/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py deleted file mode 100644 index aa4ad614f2..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py +++ /dev/null @@ -1,37 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.annotations_inner import AnnotationsInner # noqa: F401,E501 -from src import util - - -class Annotations(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self): # noqa: E501 - """Annotations - a model defined in Swagger - - """ - self.swagger_types = { - } - - self.attribute_map = { - } - - @classmethod - def from_dict(cls, dikt) -> 'Annotations': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The Annotations of this Annotations. # noqa: E501 - :rtype: Annotations - """ - return util.deserialize_model(dikt, cls) diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py b/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py deleted file mode 100644 index 3fad26815c..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class AnnotationsInner(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, box: object=None, class_id: int=None, score: int=None): # noqa: E501 - """AnnotationsInner - a model defined in Swagger - - :param box: The box of this AnnotationsInner. # noqa: E501 - :type box: object - :param class_id: The class_id of this AnnotationsInner. # noqa: E501 - :type class_id: int - :param score: The score of this AnnotationsInner. # noqa: E501 - :type score: int - """ - self.swagger_types = { - 'box': object, - 'class_id': int, - 'score': int - } - - self.attribute_map = { - 'box': 'box', - 'class_id': 'class_id', - 'score': 'score' - } - self._box = box - self._class_id = class_id - self._score = score - - @classmethod - def from_dict(cls, dikt) -> 'AnnotationsInner': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The Annotations_inner of this AnnotationsInner. # noqa: E501 - :rtype: AnnotationsInner - """ - return util.deserialize_model(dikt, cls) - - @property - def box(self) -> object: - """Gets the box of this AnnotationsInner. - - - :return: The box of this AnnotationsInner. - :rtype: object - """ - return self._box - - @box.setter - def box(self, box: object): - """Sets the box of this AnnotationsInner. - - - :param box: The box of this AnnotationsInner. - :type box: object - """ - - self._box = box - - @property - def class_id(self) -> int: - """Gets the class_id of this AnnotationsInner. - - - :return: The class_id of this AnnotationsInner. - :rtype: int - """ - return self._class_id - - @class_id.setter - def class_id(self, class_id: int): - """Sets the class_id of this AnnotationsInner. - - - :param class_id: The class_id of this AnnotationsInner. - :type class_id: int - """ - - self._class_id = class_id - - @property - def score(self) -> int: - """Gets the score of this AnnotationsInner. - - - :return: The score of this AnnotationsInner. - :rtype: int - """ - return self._score - - @score.setter - def score(self, score: int): - """Sets the score of this AnnotationsInner. - - - :param score: The score of this AnnotationsInner. - :type score: int - """ - - self._score = score diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py b/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py deleted file mode 100644 index 6093786f0d..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py +++ /dev/null @@ -1,114 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class ApiResponse(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, code: int=None, request_id: str=None, message: str=None): # noqa: E501 - """ApiResponse - a model defined in Swagger - - :param code: The code of this ApiResponse. # noqa: E501 - :type code: int - :param request_id: The request_id of this ApiResponse. # noqa: E501 - :type request_id: str - :param message: The message of this ApiResponse. # noqa: E501 - :type message: str - """ - self.swagger_types = { - 'code': int, - 'request_id': str, - 'message': str - } - - self.attribute_map = { - 'code': 'code', - 'request_id': 'request_id', - 'message': 'message' - } - self._code = code - self._request_id = request_id - self._message = message - - @classmethod - def from_dict(cls, dikt) -> 'ApiResponse': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The ApiResponse of this ApiResponse. # noqa: E501 - :rtype: ApiResponse - """ - return util.deserialize_model(dikt, cls) - - @property - def code(self) -> int: - """Gets the code of this ApiResponse. - - - :return: The code of this ApiResponse. - :rtype: int - """ - return self._code - - @code.setter - def code(self, code: int): - """Sets the code of this ApiResponse. - - - :param code: The code of this ApiResponse. - :type code: int - """ - - self._code = code - - @property - def request_id(self) -> str: - """Gets the request_id of this ApiResponse. - - - :return: The request_id of this ApiResponse. - :rtype: str - """ - return self._request_id - - @request_id.setter - def request_id(self, request_id: str): - """Sets the request_id of this ApiResponse. - - - :param request_id: The request_id of this ApiResponse. - :type request_id: str - """ - - self._request_id = request_id - - @property - def message(self) -> str: - """Gets the message of this ApiResponse. - - - :return: The message of this ApiResponse. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message: str): - """Sets the message of this ApiResponse. - - - :param message: The message of this ApiResponse. - :type message: str - """ - - self._message = message diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py deleted file mode 100644 index 5c5ec1c360..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class AssetInfo(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, asset_id: str=None, class_ids: List[int]=None): # noqa: E501 - """AssetInfo - a model defined in Swagger - - :param asset_id: The asset_id of this AssetInfo. # noqa: E501 - :type asset_id: str - :param class_ids: The class_ids of this AssetInfo. # noqa: E501 - :type class_ids: List[int] - """ - self.swagger_types = { - 'asset_id': str, - 'class_ids': List[int] - } - - self.attribute_map = { - 'asset_id': 'asset_id', - 'class_ids': 'class_ids' - } - self._asset_id = asset_id - self._class_ids = class_ids - - @classmethod - def from_dict(cls, dikt) -> 'AssetInfo': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetInfo of this AssetInfo. # noqa: E501 - :rtype: AssetInfo - """ - return util.deserialize_model(dikt, cls) - - @property - def asset_id(self) -> str: - """Gets the asset_id of this AssetInfo. - - - :return: The asset_id of this AssetInfo. - :rtype: str - """ - return self._asset_id - - @asset_id.setter - def asset_id(self, asset_id: str): - """Sets the asset_id of this AssetInfo. - - - :param asset_id: The asset_id of this AssetInfo. - :type asset_id: str - """ - - self._asset_id = asset_id - - @property - def class_ids(self) -> List[int]: - """Gets the class_ids of this AssetInfo. - - - :return: The class_ids of this AssetInfo. - :rtype: List[int] - """ - return self._class_ids - - @class_ids.setter - def class_ids(self, class_ids: List[int]): - """Sets the class_ids of this AssetInfo. - - - :param class_ids: The class_ids of this AssetInfo. - :type class_ids: List[int] - """ - - self._class_ids = class_ids diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py deleted file mode 100644 index 1976a127a5..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 -from src.swagger_models.assets_meta_info import AssetsMetaInfo # noqa: F401,E501 -from src import util - - -class AssetMetaResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, code: int=None, request_id: str=None, message: str=None, result: AssetsMetaInfo=None): # noqa: E501 - """AssetMetaResult - a model defined in Swagger - - :param code: The code of this AssetMetaResult. # noqa: E501 - :type code: int - :param request_id: The request_id of this AssetMetaResult. # noqa: E501 - :type request_id: str - :param message: The message of this AssetMetaResult. # noqa: E501 - :type message: str - :param result: The result of this AssetMetaResult. # noqa: E501 - :type result: AssetsMetaInfo - """ - self.swagger_types = { - 'code': int, - 'request_id': str, - 'message': str, - 'result': AssetsMetaInfo - } - - self.attribute_map = { - 'code': 'code', - 'request_id': 'request_id', - 'message': 'message', - 'result': 'result' - } - self._code = code - self._request_id = request_id - self._message = message - self._result = result - - @classmethod - def from_dict(cls, dikt) -> 'AssetMetaResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetMetaResult of this AssetMetaResult. # noqa: E501 - :rtype: AssetMetaResult - """ - return util.deserialize_model(dikt, cls) - - @property - def code(self) -> int: - """Gets the code of this AssetMetaResult. - - - :return: The code of this AssetMetaResult. - :rtype: int - """ - return self._code - - @code.setter - def code(self, code: int): - """Sets the code of this AssetMetaResult. - - - :param code: The code of this AssetMetaResult. - :type code: int - """ - - self._code = code - - @property - def request_id(self) -> str: - """Gets the request_id of this AssetMetaResult. - - - :return: The request_id of this AssetMetaResult. - :rtype: str - """ - return self._request_id - - @request_id.setter - def request_id(self, request_id: str): - """Sets the request_id of this AssetMetaResult. - - - :param request_id: The request_id of this AssetMetaResult. - :type request_id: str - """ - - self._request_id = request_id - - @property - def message(self) -> str: - """Gets the message of this AssetMetaResult. - - - :return: The message of this AssetMetaResult. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message: str): - """Sets the message of this AssetMetaResult. - - - :param message: The message of this AssetMetaResult. - :type message: str - """ - - self._message = message - - @property - def result(self) -> AssetsMetaInfo: - """Gets the result of this AssetMetaResult. - - - :return: The result of this AssetMetaResult. - :rtype: AssetsMetaInfo - """ - return self._result - - @result.setter - def result(self, result: AssetsMetaInfo): - """Sets the result of this AssetMetaResult. - - - :param result: The result of this AssetMetaResult. - :type result: AssetsMetaInfo - """ - - self._result = result diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py deleted file mode 100644 index 95ed3d1f08..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.annotations import Annotations # noqa: F401,E501 -from src.swagger_models.assets_meta_info_metadata import AssetsMetaInfoMetadata # noqa: F401,E501 -from src import util - - -class AssetsMetaInfo(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, annotations: Annotations=None, class_ids: List[int]=None, metadata: AssetsMetaInfoMetadata=None): # noqa: E501 - """AssetsMetaInfo - a model defined in Swagger - - :param annotations: The annotations of this AssetsMetaInfo. # noqa: E501 - :type annotations: Annotations - :param class_ids: The class_ids of this AssetsMetaInfo. # noqa: E501 - :type class_ids: List[int] - :param metadata: The metadata of this AssetsMetaInfo. # noqa: E501 - :type metadata: AssetsMetaInfoMetadata - """ - self.swagger_types = { - 'annotations': Annotations, - 'class_ids': List[int], - 'metadata': AssetsMetaInfoMetadata - } - - self.attribute_map = { - 'annotations': 'annotations', - 'class_ids': 'class_ids', - 'metadata': 'metadata' - } - self._annotations = annotations - self._class_ids = class_ids - self._metadata = metadata - - @classmethod - def from_dict(cls, dikt) -> 'AssetsMetaInfo': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetsMetaInfo of this AssetsMetaInfo. # noqa: E501 - :rtype: AssetsMetaInfo - """ - return util.deserialize_model(dikt, cls) - - @property - def annotations(self) -> Annotations: - """Gets the annotations of this AssetsMetaInfo. - - - :return: The annotations of this AssetsMetaInfo. - :rtype: Annotations - """ - return self._annotations - - @annotations.setter - def annotations(self, annotations: Annotations): - """Sets the annotations of this AssetsMetaInfo. - - - :param annotations: The annotations of this AssetsMetaInfo. - :type annotations: Annotations - """ - - self._annotations = annotations - - @property - def class_ids(self) -> List[int]: - """Gets the class_ids of this AssetsMetaInfo. - - - :return: The class_ids of this AssetsMetaInfo. - :rtype: List[int] - """ - return self._class_ids - - @class_ids.setter - def class_ids(self, class_ids: List[int]): - """Sets the class_ids of this AssetsMetaInfo. - - - :param class_ids: The class_ids of this AssetsMetaInfo. - :type class_ids: List[int] - """ - - self._class_ids = class_ids - - @property - def metadata(self) -> AssetsMetaInfoMetadata: - """Gets the metadata of this AssetsMetaInfo. - - - :return: The metadata of this AssetsMetaInfo. - :rtype: AssetsMetaInfoMetadata - """ - return self._metadata - - @metadata.setter - def metadata(self, metadata: AssetsMetaInfoMetadata): - """Sets the metadata of this AssetsMetaInfo. - - - :param metadata: The metadata of this AssetsMetaInfo. - :type metadata: AssetsMetaInfoMetadata - """ - - self._metadata = metadata diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py deleted file mode 100644 index f8fc21bc16..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.assets_meta_info_metadata_timestamp import AssetsMetaInfoMetadataTimestamp # noqa: F401,E501 -from src import util - - -class AssetsMetaInfoMetadata(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, asset_type: str=None, width: int=None, height: int=None, image_channels: int=None, timestamp: AssetsMetaInfoMetadataTimestamp=None): # noqa: E501 - """AssetsMetaInfoMetadata - a model defined in Swagger - - :param asset_type: The asset_type of this AssetsMetaInfoMetadata. # noqa: E501 - :type asset_type: str - :param width: The width of this AssetsMetaInfoMetadata. # noqa: E501 - :type width: int - :param height: The height of this AssetsMetaInfoMetadata. # noqa: E501 - :type height: int - :param image_channels: The image_channels of this AssetsMetaInfoMetadata. # noqa: E501 - :type image_channels: int - :param timestamp: The timestamp of this AssetsMetaInfoMetadata. # noqa: E501 - :type timestamp: AssetsMetaInfoMetadataTimestamp - """ - self.swagger_types = { - 'asset_type': str, - 'width': int, - 'height': int, - 'image_channels': int, - 'timestamp': AssetsMetaInfoMetadataTimestamp - } - - self.attribute_map = { - 'asset_type': 'asset_type', - 'width': 'width', - 'height': 'height', - 'image_channels': 'image_channels', - 'timestamp': 'timestamp' - } - self._asset_type = asset_type - self._width = width - self._height = height - self._image_channels = image_channels - self._timestamp = timestamp - - @classmethod - def from_dict(cls, dikt) -> 'AssetsMetaInfoMetadata': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetsMetaInfo_metadata of this AssetsMetaInfoMetadata. # noqa: E501 - :rtype: AssetsMetaInfoMetadata - """ - return util.deserialize_model(dikt, cls) - - @property - def asset_type(self) -> str: - """Gets the asset_type of this AssetsMetaInfoMetadata. - - - :return: The asset_type of this AssetsMetaInfoMetadata. - :rtype: str - """ - return self._asset_type - - @asset_type.setter - def asset_type(self, asset_type: str): - """Sets the asset_type of this AssetsMetaInfoMetadata. - - - :param asset_type: The asset_type of this AssetsMetaInfoMetadata. - :type asset_type: str - """ - - self._asset_type = asset_type - - @property - def width(self) -> int: - """Gets the width of this AssetsMetaInfoMetadata. - - - :return: The width of this AssetsMetaInfoMetadata. - :rtype: int - """ - return self._width - - @width.setter - def width(self, width: int): - """Sets the width of this AssetsMetaInfoMetadata. - - - :param width: The width of this AssetsMetaInfoMetadata. - :type width: int - """ - - self._width = width - - @property - def height(self) -> int: - """Gets the height of this AssetsMetaInfoMetadata. - - - :return: The height of this AssetsMetaInfoMetadata. - :rtype: int - """ - return self._height - - @height.setter - def height(self, height: int): - """Sets the height of this AssetsMetaInfoMetadata. - - - :param height: The height of this AssetsMetaInfoMetadata. - :type height: int - """ - - self._height = height - - @property - def image_channels(self) -> int: - """Gets the image_channels of this AssetsMetaInfoMetadata. - - - :return: The image_channels of this AssetsMetaInfoMetadata. - :rtype: int - """ - return self._image_channels - - @image_channels.setter - def image_channels(self, image_channels: int): - """Sets the image_channels of this AssetsMetaInfoMetadata. - - - :param image_channels: The image_channels of this AssetsMetaInfoMetadata. - :type image_channels: int - """ - - self._image_channels = image_channels - - @property - def timestamp(self) -> AssetsMetaInfoMetadataTimestamp: - """Gets the timestamp of this AssetsMetaInfoMetadata. - - - :return: The timestamp of this AssetsMetaInfoMetadata. - :rtype: AssetsMetaInfoMetadataTimestamp - """ - return self._timestamp - - @timestamp.setter - def timestamp(self, timestamp: AssetsMetaInfoMetadataTimestamp): - """Sets the timestamp of this AssetsMetaInfoMetadata. - - - :param timestamp: The timestamp of this AssetsMetaInfoMetadata. - :type timestamp: AssetsMetaInfoMetadataTimestamp - """ - - self._timestamp = timestamp diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py deleted file mode 100644 index 1353e18775..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py +++ /dev/null @@ -1,62 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class AssetsMetaInfoMetadataTimestamp(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, start: int=None): # noqa: E501 - """AssetsMetaInfoMetadataTimestamp - a model defined in Swagger - - :param start: The start of this AssetsMetaInfoMetadataTimestamp. # noqa: E501 - :type start: int - """ - self.swagger_types = { - 'start': int - } - - self.attribute_map = { - 'start': 'start' - } - self._start = start - - @classmethod - def from_dict(cls, dikt) -> 'AssetsMetaInfoMetadataTimestamp': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetsMetaInfo_metadata_timestamp of this AssetsMetaInfoMetadataTimestamp. # noqa: E501 - :rtype: AssetsMetaInfoMetadataTimestamp - """ - return util.deserialize_model(dikt, cls) - - @property - def start(self) -> int: - """Gets the start of this AssetsMetaInfoMetadataTimestamp. - - - :return: The start of this AssetsMetaInfoMetadataTimestamp. - :rtype: int - """ - return self._start - - @start.setter - def start(self, start: int): - """Sets the start of this AssetsMetaInfoMetadataTimestamp. - - - :param start: The start of this AssetsMetaInfoMetadataTimestamp. - :type start: int - """ - - self._start = start diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py deleted file mode 100644 index affa53503d..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 -from src.swagger_models.assets_result_result import AssetsResultResult # noqa: F401,E501 -from src import util - - -class AssetsResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, code: int=None, request_id: str=None, message: str=None, result: AssetsResultResult=None): # noqa: E501 - """AssetsResult - a model defined in Swagger - - :param code: The code of this AssetsResult. # noqa: E501 - :type code: int - :param request_id: The request_id of this AssetsResult. # noqa: E501 - :type request_id: str - :param message: The message of this AssetsResult. # noqa: E501 - :type message: str - :param result: The result of this AssetsResult. # noqa: E501 - :type result: AssetsResultResult - """ - self.swagger_types = { - 'code': int, - 'request_id': str, - 'message': str, - 'result': AssetsResultResult - } - - self.attribute_map = { - 'code': 'code', - 'request_id': 'request_id', - 'message': 'message', - 'result': 'result' - } - self._code = code - self._request_id = request_id - self._message = message - self._result = result - - @classmethod - def from_dict(cls, dikt) -> 'AssetsResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetsResult of this AssetsResult. # noqa: E501 - :rtype: AssetsResult - """ - return util.deserialize_model(dikt, cls) - - @property - def code(self) -> int: - """Gets the code of this AssetsResult. - - - :return: The code of this AssetsResult. - :rtype: int - """ - return self._code - - @code.setter - def code(self, code: int): - """Sets the code of this AssetsResult. - - - :param code: The code of this AssetsResult. - :type code: int - """ - - self._code = code - - @property - def request_id(self) -> str: - """Gets the request_id of this AssetsResult. - - - :return: The request_id of this AssetsResult. - :rtype: str - """ - return self._request_id - - @request_id.setter - def request_id(self, request_id: str): - """Sets the request_id of this AssetsResult. - - - :param request_id: The request_id of this AssetsResult. - :type request_id: str - """ - - self._request_id = request_id - - @property - def message(self) -> str: - """Gets the message of this AssetsResult. - - - :return: The message of this AssetsResult. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message: str): - """Sets the message of this AssetsResult. - - - :param message: The message of this AssetsResult. - :type message: str - """ - - self._message = message - - @property - def result(self) -> AssetsResultResult: - """Gets the result of this AssetsResult. - - - :return: The result of this AssetsResult. - :rtype: AssetsResultResult - """ - return self._result - - @result.setter - def result(self, result: AssetsResultResult): - """Sets the result of this AssetsResult. - - - :param result: The result of this AssetsResult. - :type result: AssetsResultResult - """ - - self._result = result diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py deleted file mode 100644 index 9d3879320d..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py +++ /dev/null @@ -1,141 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.asset_info import AssetInfo # noqa: F401,E501 -from src import util - - -class AssetsResultResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, elements: List[AssetInfo]=None, offset: int=None, limit: int=None, tatal: int=None): # noqa: E501 - """AssetsResultResult - a model defined in Swagger - - :param elements: The elements of this AssetsResultResult. # noqa: E501 - :type elements: List[AssetInfo] - :param offset: The offset of this AssetsResultResult. # noqa: E501 - :type offset: int - :param limit: The limit of this AssetsResultResult. # noqa: E501 - :type limit: int - :param tatal: The tatal of this AssetsResultResult. # noqa: E501 - :type tatal: int - """ - self.swagger_types = { - 'elements': List[AssetInfo], - 'offset': int, - 'limit': int, - 'tatal': int - } - - self.attribute_map = { - 'elements': 'elements', - 'offset': 'offset', - 'limit': 'limit', - 'tatal': 'tatal' - } - self._elements = elements - self._offset = offset - self._limit = limit - self._tatal = tatal - - @classmethod - def from_dict(cls, dikt) -> 'AssetsResultResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetsResult_result of this AssetsResultResult. # noqa: E501 - :rtype: AssetsResultResult - """ - return util.deserialize_model(dikt, cls) - - @property - def elements(self) -> List[AssetInfo]: - """Gets the elements of this AssetsResultResult. - - - :return: The elements of this AssetsResultResult. - :rtype: List[AssetInfo] - """ - return self._elements - - @elements.setter - def elements(self, elements: List[AssetInfo]): - """Sets the elements of this AssetsResultResult. - - - :param elements: The elements of this AssetsResultResult. - :type elements: List[AssetInfo] - """ - - self._elements = elements - - @property - def offset(self) -> int: - """Gets the offset of this AssetsResultResult. - - - :return: The offset of this AssetsResultResult. - :rtype: int - """ - return self._offset - - @offset.setter - def offset(self, offset: int): - """Sets the offset of this AssetsResultResult. - - - :param offset: The offset of this AssetsResultResult. - :type offset: int - """ - - self._offset = offset - - @property - def limit(self) -> int: - """Gets the limit of this AssetsResultResult. - - - :return: The limit of this AssetsResultResult. - :rtype: int - """ - return self._limit - - @limit.setter - def limit(self, limit: int): - """Sets the limit of this AssetsResultResult. - - - :param limit: The limit of this AssetsResultResult. - :type limit: int - """ - - self._limit = limit - - @property - def tatal(self) -> int: - """Gets the tatal of this AssetsResultResult. - - - :return: The tatal of this AssetsResultResult. - :rtype: int - """ - return self._tatal - - @tatal.setter - def tatal(self, tatal: int): - """Sets the tatal of this AssetsResultResult. - - - :param tatal: The tatal of this AssetsResultResult. - :type tatal: int - """ - - self._tatal = tatal diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result_negative_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result_negative_info.py deleted file mode 100644 index cd4426cc86..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result_negative_info.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import - -from datetime import date, datetime # noqa: F401 -from typing import List, Dict # noqa: F401 - -from src import util -from src.swagger_models.base_model_ import Model - - -class AssetsResultResultNegativeInfo(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, negative_images_cnt: int=None, project_negative_images_cnt: int=None): # noqa: E501 - """AssetsResultResultNegativeInfo - a model defined in Swagger - - :param negative_images_cnt: The negative_images_cnt of this AssetsResultResultNegativeInfo. # noqa: E501 - :type negative_images_cnt: int - :param project_negative_images_cnt: The project_negative_images_cnt of this AssetsResultResultNegativeInfo. # noqa: E501 - :type project_negative_images_cnt: int - """ - self.swagger_types = { - 'negative_images_cnt': int, - 'project_negative_images_cnt': int - } - - self.attribute_map = { - 'negative_images_cnt': 'negative_images_cnt', - 'project_negative_images_cnt': 'project_negative_images_cnt' - } - self._negative_images_cnt = negative_images_cnt - self._project_negative_images_cnt = project_negative_images_cnt - - @classmethod - def from_dict(cls, dikt) -> 'AssetsResultResultNegativeInfo': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The AssetsResult_result_negative_info of this AssetsResultResultNegativeInfo. # noqa: E501 - :rtype: AssetsResultResultNegativeInfo - """ - return util.deserialize_model(dikt, cls) - - @property - def negative_images_cnt(self) -> int: - """Gets the negative_images_cnt of this AssetsResultResultNegativeInfo. - - - :return: The negative_images_cnt of this AssetsResultResultNegativeInfo. - :rtype: int - """ - return self._negative_images_cnt - - @negative_images_cnt.setter - def negative_images_cnt(self, negative_images_cnt: int): - """Sets the negative_images_cnt of this AssetsResultResultNegativeInfo. - - - :param negative_images_cnt: The negative_images_cnt of this AssetsResultResultNegativeInfo. - :type negative_images_cnt: int - """ - - self._negative_images_cnt = negative_images_cnt - - @property - def project_negative_images_cnt(self) -> int: - """Gets the project_negative_images_cnt of this AssetsResultResultNegativeInfo. - - - :return: The project_negative_images_cnt of this AssetsResultResultNegativeInfo. - :rtype: int - """ - return self._project_negative_images_cnt - - @project_negative_images_cnt.setter - def project_negative_images_cnt(self, project_negative_images_cnt: int): - """Sets the project_negative_images_cnt of this AssetsResultResultNegativeInfo. - - - :param project_negative_images_cnt: The project_negative_images_cnt of this AssetsResultResultNegativeInfo. - :type project_negative_images_cnt: int - """ - - self._project_negative_images_cnt = project_negative_images_cnt diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py b/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py deleted file mode 100644 index f168c28658..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py +++ /dev/null @@ -1,69 +0,0 @@ -import pprint - -import six -import typing - -from src import util - -T = typing.TypeVar('T') - - -class Model(object): - # swaggerTypes: The key is attribute name and the - # value is attribute type. - swagger_types = {} - - # attributeMap: The key is attribute name and the - # value is json key in definition. - attribute_map = {} - - @classmethod - def from_dict(cls: typing.Type[T], dikt) -> T: - """Returns the dict as a model""" - return util.deserialize_model(dikt, cls) - - def to_dict(self): - """Returns the model properties as a dict - - :rtype: dict - """ - result = {} - - for attr, _ in six.iteritems(self.swagger_types): - value = getattr(self, attr) - if isinstance(value, list): - result[attr] = list(map( - lambda x: x.to_dict() if hasattr(x, "to_dict") else x, - value - )) - elif hasattr(value, "to_dict"): - result[attr] = value.to_dict() - elif isinstance(value, dict): - result[attr] = dict(map( - lambda item: (item[0], item[1].to_dict()) - if hasattr(item[1], "to_dict") else item, - value.items() - )) - else: - result[attr] = value - - return result - - def to_str(self): - """Returns the string representation of the model - - :rtype: str - """ - return pprint.pformat(self.to_dict()) - - def __repr__(self): - """For `print` and `pprint`""" - return self.to_str() - - def __eq__(self, other): - """Returns true if both objects are equal""" - return self.__dict__ == other.__dict__ - - def __ne__(self, other): - """Returns true if both objects are not equal""" - return not self == other diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation.py deleted file mode 100644 index 8e03515da8..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.dataset_evaluation_element import DatasetEvaluationElement # noqa: F401,E501 -from src import util - - -class DatasetEvaluation(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, ci_evaluations: Dict[str, DatasetEvaluationElement]=None, topic_evaluations: Dict[str, DatasetEvaluationElement]=None, ci_averaged_evaluation: DatasetEvaluationElement=None): # noqa: E501 - """DatasetEvaluation - a model defined in Swagger - - :param ci_evaluations: The ci_evaluations of this DatasetEvaluation. # noqa: E501 - :type ci_evaluations: Dict[str, DatasetEvaluationElement] - :param topic_evaluations: The topic_evaluations of this DatasetEvaluation. # noqa: E501 - :type topic_evaluations: Dict[str, DatasetEvaluationElement] - :param ci_averaged_evaluation: The ci_averaged_evaluation of this DatasetEvaluation. # noqa: E501 - :type ci_averaged_evaluation: DatasetEvaluationElement - """ - self.swagger_types = { - 'ci_evaluations': Dict[str, DatasetEvaluationElement], - 'topic_evaluations': Dict[str, DatasetEvaluationElement], - 'ci_averaged_evaluation': DatasetEvaluationElement - } - - self.attribute_map = { - 'ci_evaluations': 'ci_evaluations', - 'topic_evaluations': 'topic_evaluations', - 'ci_averaged_evaluation': 'ci_averaged_evaluation' - } - self._ci_evaluations = ci_evaluations - self._topic_evaluations = topic_evaluations - self._ci_averaged_evaluation = ci_averaged_evaluation - - @classmethod - def from_dict(cls, dikt) -> 'DatasetEvaluation': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetEvaluation of this DatasetEvaluation. # noqa: E501 - :rtype: DatasetEvaluation - """ - return util.deserialize_model(dikt, cls) - - @property - def ci_evaluations(self) -> Dict[str, DatasetEvaluationElement]: - """Gets the ci_evaluations of this DatasetEvaluation. - - - :return: The ci_evaluations of this DatasetEvaluation. - :rtype: Dict[str, DatasetEvaluationElement] - """ - return self._ci_evaluations - - @ci_evaluations.setter - def ci_evaluations(self, ci_evaluations: Dict[str, DatasetEvaluationElement]): - """Sets the ci_evaluations of this DatasetEvaluation. - - - :param ci_evaluations: The ci_evaluations of this DatasetEvaluation. - :type ci_evaluations: Dict[str, DatasetEvaluationElement] - """ - - self._ci_evaluations = ci_evaluations - - @property - def topic_evaluations(self) -> Dict[str, DatasetEvaluationElement]: - """Gets the topic_evaluations of this DatasetEvaluation. - - - :return: The topic_evaluations of this DatasetEvaluation. - :rtype: Dict[str, DatasetEvaluationElement] - """ - return self._topic_evaluations - - @topic_evaluations.setter - def topic_evaluations(self, topic_evaluations: Dict[str, DatasetEvaluationElement]): - """Sets the topic_evaluations of this DatasetEvaluation. - - - :param topic_evaluations: The topic_evaluations of this DatasetEvaluation. - :type topic_evaluations: Dict[str, DatasetEvaluationElement] - """ - - self._topic_evaluations = topic_evaluations - - @property - def ci_averaged_evaluation(self) -> DatasetEvaluationElement: - """Gets the ci_averaged_evaluation of this DatasetEvaluation. - - - :return: The ci_averaged_evaluation of this DatasetEvaluation. - :rtype: DatasetEvaluationElement - """ - return self._ci_averaged_evaluation - - @ci_averaged_evaluation.setter - def ci_averaged_evaluation(self, ci_averaged_evaluation: DatasetEvaluationElement): - """Sets the ci_averaged_evaluation of this DatasetEvaluation. - - - :param ci_averaged_evaluation: The ci_averaged_evaluation of this DatasetEvaluation. - :type ci_averaged_evaluation: DatasetEvaluationElement - """ - - self._ci_averaged_evaluation = ci_averaged_evaluation diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_element.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_element.py deleted file mode 100644 index c03a697f2f..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_element.py +++ /dev/null @@ -1,166 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class DatasetEvaluationElement(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, ap: float=None, ar: float=None, tp: int=None, fp: int=None, fn: int=None): # noqa: E501 - """DatasetEvaluationElement - a model defined in Swagger - - :param ap: The ap of this DatasetEvaluationElement. # noqa: E501 - :type ap: float - :param ar: The ar of this DatasetEvaluationElement. # noqa: E501 - :type ar: float - :param tp: The tp of this DatasetEvaluationElement. # noqa: E501 - :type tp: int - :param fp: The fp of this DatasetEvaluationElement. # noqa: E501 - :type fp: int - :param fn: The fn of this DatasetEvaluationElement. # noqa: E501 - :type fn: int - """ - self.swagger_types = { - 'ap': float, - 'ar': float, - 'tp': int, - 'fp': int, - 'fn': int - } - - self.attribute_map = { - 'ap': 'ap', - 'ar': 'ar', - 'tp': 'tp', - 'fp': 'fp', - 'fn': 'fn' - } - self._ap = ap - self._ar = ar - self._tp = tp - self._fp = fp - self._fn = fn - - @classmethod - def from_dict(cls, dikt) -> 'DatasetEvaluationElement': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetEvaluationElement of this DatasetEvaluationElement. # noqa: E501 - :rtype: DatasetEvaluationElement - """ - return util.deserialize_model(dikt, cls) - - @property - def ap(self) -> float: - """Gets the ap of this DatasetEvaluationElement. - - - :return: The ap of this DatasetEvaluationElement. - :rtype: float - """ - return self._ap - - @ap.setter - def ap(self, ap: float): - """Sets the ap of this DatasetEvaluationElement. - - - :param ap: The ap of this DatasetEvaluationElement. - :type ap: float - """ - - self._ap = ap - - @property - def ar(self) -> float: - """Gets the ar of this DatasetEvaluationElement. - - - :return: The ar of this DatasetEvaluationElement. - :rtype: float - """ - return self._ar - - @ar.setter - def ar(self, ar: float): - """Sets the ar of this DatasetEvaluationElement. - - - :param ar: The ar of this DatasetEvaluationElement. - :type ar: float - """ - - self._ar = ar - - @property - def tp(self) -> int: - """Gets the tp of this DatasetEvaluationElement. - - - :return: The tp of this DatasetEvaluationElement. - :rtype: int - """ - return self._tp - - @tp.setter - def tp(self, tp: int): - """Sets the tp of this DatasetEvaluationElement. - - - :param tp: The tp of this DatasetEvaluationElement. - :type tp: int - """ - - self._tp = tp - - @property - def fp(self) -> int: - """Gets the fp of this DatasetEvaluationElement. - - - :return: The fp of this DatasetEvaluationElement. - :rtype: int - """ - return self._fp - - @fp.setter - def fp(self, fp: int): - """Sets the fp of this DatasetEvaluationElement. - - - :param fp: The fp of this DatasetEvaluationElement. - :type fp: int - """ - - self._fp = fp - - @property - def fn(self) -> int: - """Gets the fn of this DatasetEvaluationElement. - - - :return: The fn of this DatasetEvaluationElement. - :rtype: int - """ - return self._fn - - @fn.setter - def fn(self, fn: int): - """Sets the fn of this DatasetEvaluationElement. - - - :param fn: The fn of this DatasetEvaluationElement. - :type fn: int - """ - - self._fn = fn diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result.py deleted file mode 100644 index 50cf8b4b58..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 -from src.swagger_models.dataset_evaluation_result_result import DatasetEvaluationResultResult # noqa: F401,E501 -from src import util - - -class DatasetEvaluationResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, code: int=None, request_id: str=None, message: str=None, result: Dict[str, DatasetEvaluationResultResult]=None): # noqa: E501 - """DatasetEvaluationResult - a model defined in Swagger - - :param code: The code of this DatasetEvaluationResult. # noqa: E501 - :type code: int - :param request_id: The request_id of this DatasetEvaluationResult. # noqa: E501 - :type request_id: str - :param message: The message of this DatasetEvaluationResult. # noqa: E501 - :type message: str - :param result: The result of this DatasetEvaluationResult. # noqa: E501 - :type result: Dict[str, DatasetEvaluationResultResult] - """ - self.swagger_types = { - 'code': int, - 'request_id': str, - 'message': str, - 'result': Dict[str, DatasetEvaluationResultResult] - } - - self.attribute_map = { - 'code': 'code', - 'request_id': 'request_id', - 'message': 'message', - 'result': 'result' - } - self._code = code - self._request_id = request_id - self._message = message - self._result = result - - @classmethod - def from_dict(cls, dikt) -> 'DatasetEvaluationResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetEvaluationResult of this DatasetEvaluationResult. # noqa: E501 - :rtype: DatasetEvaluationResult - """ - return util.deserialize_model(dikt, cls) - - @property - def code(self) -> int: - """Gets the code of this DatasetEvaluationResult. - - - :return: The code of this DatasetEvaluationResult. - :rtype: int - """ - return self._code - - @code.setter - def code(self, code: int): - """Sets the code of this DatasetEvaluationResult. - - - :param code: The code of this DatasetEvaluationResult. - :type code: int - """ - - self._code = code - - @property - def request_id(self) -> str: - """Gets the request_id of this DatasetEvaluationResult. - - - :return: The request_id of this DatasetEvaluationResult. - :rtype: str - """ - return self._request_id - - @request_id.setter - def request_id(self, request_id: str): - """Sets the request_id of this DatasetEvaluationResult. - - - :param request_id: The request_id of this DatasetEvaluationResult. - :type request_id: str - """ - - self._request_id = request_id - - @property - def message(self) -> str: - """Gets the message of this DatasetEvaluationResult. - - - :return: The message of this DatasetEvaluationResult. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message: str): - """Sets the message of this DatasetEvaluationResult. - - - :param message: The message of this DatasetEvaluationResult. - :type message: str - """ - - self._message = message - - @property - def result(self) -> Dict[str, DatasetEvaluationResultResult]: - """Gets the result of this DatasetEvaluationResult. - - - :return: The result of this DatasetEvaluationResult. - :rtype: Dict[str, DatasetEvaluationResultResult] - """ - return self._result - - @result.setter - def result(self, result: Dict[str, DatasetEvaluationResultResult]): - """Sets the result of this DatasetEvaluationResult. - - - :param result: The result of this DatasetEvaluationResult. - :type result: Dict[str, DatasetEvaluationResultResult] - """ - - self._result = result diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result_result.py deleted file mode 100644 index 48cff51cf7..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result_result.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.dataset_evaluation import DatasetEvaluation # noqa: F401,E501 -from src import util - - -class DatasetEvaluationResultResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, conf_thr: float=None, iou_evaluations: Dict[str, DatasetEvaluation]=None, iou_averaged_evaluation: DatasetEvaluation=None): # noqa: E501 - """DatasetEvaluationResultResult - a model defined in Swagger - - :param conf_thr: The conf_thr of this DatasetEvaluationResultResult. # noqa: E501 - :type conf_thr: float - :param iou_evaluations: The iou_evaluations of this DatasetEvaluationResultResult. # noqa: E501 - :type iou_evaluations: Dict[str, DatasetEvaluation] - :param iou_averaged_evaluation: The iou_averaged_evaluation of this DatasetEvaluationResultResult. # noqa: E501 - :type iou_averaged_evaluation: DatasetEvaluation - """ - self.swagger_types = { - 'conf_thr': float, - 'iou_evaluations': Dict[str, DatasetEvaluation], - 'iou_averaged_evaluation': DatasetEvaluation - } - - self.attribute_map = { - 'conf_thr': 'conf_thr', - 'iou_evaluations': 'iou_evaluations', - 'iou_averaged_evaluation': 'iou_averaged_evaluation' - } - self._conf_thr = conf_thr - self._iou_evaluations = iou_evaluations - self._iou_averaged_evaluation = iou_averaged_evaluation - - @classmethod - def from_dict(cls, dikt) -> 'DatasetEvaluationResultResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetEvaluationResult_result of this DatasetEvaluationResultResult. # noqa: E501 - :rtype: DatasetEvaluationResultResult - """ - return util.deserialize_model(dikt, cls) - - @property - def conf_thr(self) -> float: - """Gets the conf_thr of this DatasetEvaluationResultResult. - - - :return: The conf_thr of this DatasetEvaluationResultResult. - :rtype: float - """ - return self._conf_thr - - @conf_thr.setter - def conf_thr(self, conf_thr: float): - """Sets the conf_thr of this DatasetEvaluationResultResult. - - - :param conf_thr: The conf_thr of this DatasetEvaluationResultResult. - :type conf_thr: float - """ - - self._conf_thr = conf_thr - - @property - def iou_evaluations(self) -> Dict[str, DatasetEvaluation]: - """Gets the iou_evaluations of this DatasetEvaluationResultResult. - - - :return: The iou_evaluations of this DatasetEvaluationResultResult. - :rtype: Dict[str, DatasetEvaluation] - """ - return self._iou_evaluations - - @iou_evaluations.setter - def iou_evaluations(self, iou_evaluations: Dict[str, DatasetEvaluation]): - """Sets the iou_evaluations of this DatasetEvaluationResultResult. - - - :param iou_evaluations: The iou_evaluations of this DatasetEvaluationResultResult. - :type iou_evaluations: Dict[str, DatasetEvaluation] - """ - - self._iou_evaluations = iou_evaluations - - @property - def iou_averaged_evaluation(self) -> DatasetEvaluation: - """Gets the iou_averaged_evaluation of this DatasetEvaluationResultResult. - - - :return: The iou_averaged_evaluation of this DatasetEvaluationResultResult. - :rtype: DatasetEvaluation - """ - return self._iou_averaged_evaluation - - @iou_averaged_evaluation.setter - def iou_averaged_evaluation(self, iou_averaged_evaluation: DatasetEvaluation): - """Sets the iou_averaged_evaluation of this DatasetEvaluationResultResult. - - - :param iou_averaged_evaluation: The iou_averaged_evaluation of this DatasetEvaluationResultResult. - :type iou_averaged_evaluation: DatasetEvaluation - """ - - self._iou_averaged_evaluation = iou_averaged_evaluation diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py deleted file mode 100644 index aa8a36798f..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 -from src.swagger_models.dataset_result_result import DatasetResultResult # noqa: F401,E501 -from src import util - - -class DatasetResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, code: int=None, request_id: str=None, message: str=None, result: DatasetResultResult=None): # noqa: E501 - """DatasetResult - a model defined in Swagger - - :param code: The code of this DatasetResult. # noqa: E501 - :type code: int - :param request_id: The request_id of this DatasetResult. # noqa: E501 - :type request_id: str - :param message: The message of this DatasetResult. # noqa: E501 - :type message: str - :param result: The result of this DatasetResult. # noqa: E501 - :type result: DatasetResultResult - """ - self.swagger_types = { - 'code': int, - 'request_id': str, - 'message': str, - 'result': DatasetResultResult - } - - self.attribute_map = { - 'code': 'code', - 'request_id': 'request_id', - 'message': 'message', - 'result': 'result' - } - self._code = code - self._request_id = request_id - self._message = message - self._result = result - - @classmethod - def from_dict(cls, dikt) -> 'DatasetResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetResult of this DatasetResult. # noqa: E501 - :rtype: DatasetResult - """ - return util.deserialize_model(dikt, cls) - - @property - def code(self) -> int: - """Gets the code of this DatasetResult. - - - :return: The code of this DatasetResult. - :rtype: int - """ - return self._code - - @code.setter - def code(self, code: int): - """Sets the code of this DatasetResult. - - - :param code: The code of this DatasetResult. - :type code: int - """ - - self._code = code - - @property - def request_id(self) -> str: - """Gets the request_id of this DatasetResult. - - - :return: The request_id of this DatasetResult. - :rtype: str - """ - return self._request_id - - @request_id.setter - def request_id(self, request_id: str): - """Sets the request_id of this DatasetResult. - - - :param request_id: The request_id of this DatasetResult. - :type request_id: str - """ - - self._request_id = request_id - - @property - def message(self) -> str: - """Gets the message of this DatasetResult. - - - :return: The message of this DatasetResult. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message: str): - """Sets the message of this DatasetResult. - - - :param message: The message of this DatasetResult. - :type message: str - """ - - self._message = message - - @property - def result(self) -> DatasetResultResult: - """Gets the result of this DatasetResult. - - - :return: The result of this DatasetResult. - :rtype: DatasetResultResult - """ - return self._result - - @result.setter - def result(self, result: DatasetResultResult): - """Sets the result of this DatasetResult. - - - :param result: The result of this DatasetResult. - :type result: DatasetResultResult - """ - - self._result = result diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py deleted file mode 100644 index e808429957..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py +++ /dev/null @@ -1,167 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.dataset_result_result_negative_info import DatasetResultResultNegativeInfo # noqa: F401,E501 -from src import util - - -class DatasetResultResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, total_images_cnt: int=None, class_ids_count: object=None, class_names_count: object=None, ignored_labels: object=None, negative_info: DatasetResultResultNegativeInfo=None): # noqa: E501 - """DatasetResultResult - a model defined in Swagger - - :param total_images_cnt: The total_images_cnt of this DatasetResultResult. # noqa: E501 - :type total_images_cnt: int - :param class_ids_count: The class_ids_count of this DatasetResultResult. # noqa: E501 - :type class_ids_count: object - :param class_names_count: The class_names_count of this DatasetResultResult. # noqa: E501 - :type class_names_count: object - :param ignored_labels: The ignored_labels of this DatasetResultResult. # noqa: E501 - :type ignored_labels: object - :param negative_info: The negative_info of this DatasetResultResult. # noqa: E501 - :type negative_info: DatasetResultResultNegativeInfo - """ - self.swagger_types = { - 'total_images_cnt': int, - 'class_ids_count': object, - 'class_names_count': object, - 'ignored_labels': object, - 'negative_info': DatasetResultResultNegativeInfo - } - - self.attribute_map = { - 'total_images_cnt': 'total_images_cnt', - 'class_ids_count': 'class_ids_count', - 'class_names_count': 'class_names_count', - 'ignored_labels': 'ignored_labels', - 'negative_info': 'negative_info' - } - self._total_images_cnt = total_images_cnt - self._class_ids_count = class_ids_count - self._class_names_count = class_names_count - self._ignored_labels = ignored_labels - self._negative_info = negative_info - - @classmethod - def from_dict(cls, dikt) -> 'DatasetResultResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetResult_result of this DatasetResultResult. # noqa: E501 - :rtype: DatasetResultResult - """ - return util.deserialize_model(dikt, cls) - - @property - def total_images_cnt(self) -> int: - """Gets the total_images_cnt of this DatasetResultResult. - - - :return: The total_images_cnt of this DatasetResultResult. - :rtype: int - """ - return self._total_images_cnt - - @total_images_cnt.setter - def total_images_cnt(self, total_images_cnt: int): - """Sets the total_images_cnt of this DatasetResultResult. - - - :param total_images_cnt: The total_images_cnt of this DatasetResultResult. - :type total_images_cnt: int - """ - - self._total_images_cnt = total_images_cnt - - @property - def class_ids_count(self) -> object: - """Gets the class_ids_count of this DatasetResultResult. - - - :return: The class_ids_count of this DatasetResultResult. - :rtype: object - """ - return self._class_ids_count - - @class_ids_count.setter - def class_ids_count(self, class_ids_count: object): - """Sets the class_ids_count of this DatasetResultResult. - - - :param class_ids_count: The class_ids_count of this DatasetResultResult. - :type class_ids_count: object - """ - - self._class_ids_count = class_ids_count - - @property - def class_names_count(self) -> object: - """Gets the class_names_count of this DatasetResultResult. - - - :return: The class_names_count of this DatasetResultResult. - :rtype: object - """ - return self._class_names_count - - @class_names_count.setter - def class_names_count(self, class_names_count: object): - """Sets the class_names_count of this DatasetResultResult. - - - :param class_names_count: The class_names_count of this DatasetResultResult. - :type class_names_count: object - """ - - self._class_names_count = class_names_count - - @property - def ignored_labels(self) -> object: - """Gets the ignored_labels of this DatasetResultResult. - - - :return: The ignored_labels of this DatasetResultResult. - :rtype: object - """ - return self._ignored_labels - - @ignored_labels.setter - def ignored_labels(self, ignored_labels: object): - """Sets the ignored_labels of this DatasetResultResult. - - - :param ignored_labels: The ignored_labels of this DatasetResultResult. - :type ignored_labels: object - """ - - self._ignored_labels = ignored_labels - - @property - def negative_info(self) -> DatasetResultResultNegativeInfo: - """Gets the negative_info of this DatasetResultResult. - - - :return: The negative_info of this DatasetResultResult. - :rtype: DatasetResultResultNegativeInfo - """ - return self._negative_info - - @negative_info.setter - def negative_info(self, negative_info: DatasetResultResultNegativeInfo): - """Sets the negative_info of this DatasetResultResult. - - - :param negative_info: The negative_info of this DatasetResultResult. - :type negative_info: DatasetResultResultNegativeInfo - """ - - self._negative_info = negative_info diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py deleted file mode 100644 index e5fc22e3dc..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py +++ /dev/null @@ -1,88 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class DatasetResultResultNegativeInfo(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, negative_images_cnt: int=None, project_negative_images_cnt: int=None): # noqa: E501 - """DatasetResultResultNegativeInfo - a model defined in Swagger - - :param negative_images_cnt: The negative_images_cnt of this DatasetResultResultNegativeInfo. # noqa: E501 - :type negative_images_cnt: int - :param project_negative_images_cnt: The project_negative_images_cnt of this DatasetResultResultNegativeInfo. # noqa: E501 - :type project_negative_images_cnt: int - """ - self.swagger_types = { - 'negative_images_cnt': int, - 'project_negative_images_cnt': int - } - - self.attribute_map = { - 'negative_images_cnt': 'negative_images_cnt', - 'project_negative_images_cnt': 'project_negative_images_cnt' - } - self._negative_images_cnt = negative_images_cnt - self._project_negative_images_cnt = project_negative_images_cnt - - @classmethod - def from_dict(cls, dikt) -> 'DatasetResultResultNegativeInfo': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The DatasetResult_result_negative_info of this DatasetResultResultNegativeInfo. # noqa: E501 - :rtype: DatasetResultResultNegativeInfo - """ - return util.deserialize_model(dikt, cls) - - @property - def negative_images_cnt(self) -> int: - """Gets the negative_images_cnt of this DatasetResultResultNegativeInfo. - - - :return: The negative_images_cnt of this DatasetResultResultNegativeInfo. - :rtype: int - """ - return self._negative_images_cnt - - @negative_images_cnt.setter - def negative_images_cnt(self, negative_images_cnt: int): - """Sets the negative_images_cnt of this DatasetResultResultNegativeInfo. - - - :param negative_images_cnt: The negative_images_cnt of this DatasetResultResultNegativeInfo. - :type negative_images_cnt: int - """ - - self._negative_images_cnt = negative_images_cnt - - @property - def project_negative_images_cnt(self) -> int: - """Gets the project_negative_images_cnt of this DatasetResultResultNegativeInfo. - - - :return: The project_negative_images_cnt of this DatasetResultResultNegativeInfo. - :rtype: int - """ - return self._project_negative_images_cnt - - @project_negative_images_cnt.setter - def project_negative_images_cnt(self, project_negative_images_cnt: int): - """Sets the project_negative_images_cnt of this DatasetResultResultNegativeInfo. - - - :param project_negative_images_cnt: The project_negative_images_cnt of this DatasetResultResultNegativeInfo. - :type project_negative_images_cnt: int - """ - - self._project_negative_images_cnt = project_negative_images_cnt diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py deleted file mode 100644 index ae39409b13..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 -from src.swagger_models.model_result_result import ModelResultResult # noqa: F401,E501 -from src import util - - -class ModelResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, code: int=None, request_id: str=None, message: str=None, result: ModelResultResult=None): # noqa: E501 - """ModelResult - a model defined in Swagger - - :param code: The code of this ModelResult. # noqa: E501 - :type code: int - :param request_id: The request_id of this ModelResult. # noqa: E501 - :type request_id: str - :param message: The message of this ModelResult. # noqa: E501 - :type message: str - :param result: The result of this ModelResult. # noqa: E501 - :type result: ModelResultResult - """ - self.swagger_types = { - 'code': int, - 'request_id': str, - 'message': str, - 'result': ModelResultResult - } - - self.attribute_map = { - 'code': 'code', - 'request_id': 'request_id', - 'message': 'message', - 'result': 'result' - } - self._code = code - self._request_id = request_id - self._message = message - self._result = result - - @classmethod - def from_dict(cls, dikt) -> 'ModelResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The ModelResult of this ModelResult. # noqa: E501 - :rtype: ModelResult - """ - return util.deserialize_model(dikt, cls) - - @property - def code(self) -> int: - """Gets the code of this ModelResult. - - - :return: The code of this ModelResult. - :rtype: int - """ - return self._code - - @code.setter - def code(self, code: int): - """Sets the code of this ModelResult. - - - :param code: The code of this ModelResult. - :type code: int - """ - - self._code = code - - @property - def request_id(self) -> str: - """Gets the request_id of this ModelResult. - - - :return: The request_id of this ModelResult. - :rtype: str - """ - return self._request_id - - @request_id.setter - def request_id(self, request_id: str): - """Sets the request_id of this ModelResult. - - - :param request_id: The request_id of this ModelResult. - :type request_id: str - """ - - self._request_id = request_id - - @property - def message(self) -> str: - """Gets the message of this ModelResult. - - - :return: The message of this ModelResult. - :rtype: str - """ - return self._message - - @message.setter - def message(self, message: str): - """Sets the message of this ModelResult. - - - :param message: The message of this ModelResult. - :type message: str - """ - - self._message = message - - @property - def result(self) -> ModelResultResult: - """Gets the result of this ModelResult. - - - :return: The result of this ModelResult. - :rtype: ModelResultResult - """ - return self._result - - @result.setter - def result(self, result: ModelResultResult): - """Sets the result of this ModelResult. - - - :param result: The result of this ModelResult. - :type result: ModelResultResult - """ - - self._result = result diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py deleted file mode 100644 index 486c8c55f5..0000000000 --- a/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import absolute_import -from datetime import date, datetime # noqa: F401 - -from typing import List, Dict # noqa: F401 - -from src.swagger_models.base_model_ import Model -from src import util - - -class ModelResultResult(Model): - """NOTE: This class is auto generated by the swagger code generator program. - - Do not edit the class manually. - """ - def __init__(self, model_id: str=None, model_m_ap: int=None, task_parameters: str=None, executor_config: str=None): # noqa: E501 - """ModelResultResult - a model defined in Swagger - - :param model_id: The model_id of this ModelResultResult. # noqa: E501 - :type model_id: str - :param model_m_ap: The model_m_ap of this ModelResultResult. # noqa: E501 - :type model_m_ap: int - :param task_parameters: The task_parameters of this ModelResultResult. # noqa: E501 - :type task_parameters: str - :param executor_config: The executor_config of this ModelResultResult. # noqa: E501 - :type executor_config: str - """ - self.swagger_types = { - 'model_id': str, - 'model_m_ap': int, - 'task_parameters': str, - 'executor_config': str - } - - self.attribute_map = { - 'model_id': 'model_id', - 'model_m_ap': 'model_mAP', - 'task_parameters': 'task_parameters', - 'executor_config': 'executor_config' - } - self._model_id = model_id - self._model_m_ap = model_m_ap - self._task_parameters = task_parameters - self._executor_config = executor_config - - @classmethod - def from_dict(cls, dikt) -> 'ModelResultResult': - """Returns the dict as a model - - :param dikt: A dict. - :type: dict - :return: The ModelResult_result of this ModelResultResult. # noqa: E501 - :rtype: ModelResultResult - """ - return util.deserialize_model(dikt, cls) - - @property - def model_id(self) -> str: - """Gets the model_id of this ModelResultResult. - - - :return: The model_id of this ModelResultResult. - :rtype: str - """ - return self._model_id - - @model_id.setter - def model_id(self, model_id: str): - """Sets the model_id of this ModelResultResult. - - - :param model_id: The model_id of this ModelResultResult. - :type model_id: str - """ - - self._model_id = model_id - - @property - def model_m_ap(self) -> int: - """Gets the model_m_ap of this ModelResultResult. - - mean average precision of the model # noqa: E501 - - :return: The model_m_ap of this ModelResultResult. - :rtype: int - """ - return self._model_m_ap - - @model_m_ap.setter - def model_m_ap(self, model_m_ap: int): - """Sets the model_m_ap of this ModelResultResult. - - mean average precision of the model # noqa: E501 - - :param model_m_ap: The model_m_ap of this ModelResultResult. - :type model_m_ap: int - """ - - self._model_m_ap = model_m_ap - - @property - def task_parameters(self) -> str: - """Gets the task_parameters of this ModelResultResult. - - - :return: The task_parameters of this ModelResultResult. - :rtype: str - """ - return self._task_parameters - - @task_parameters.setter - def task_parameters(self, task_parameters: str): - """Sets the task_parameters of this ModelResultResult. - - - :param task_parameters: The task_parameters of this ModelResultResult. - :type task_parameters: str - """ - - self._task_parameters = task_parameters - - @property - def executor_config(self) -> str: - """Gets the executor_config of this ModelResultResult. - - - :return: The executor_config of this ModelResultResult. - :rtype: str - """ - return self._executor_config - - @executor_config.setter - def executor_config(self, executor_config: str): - """Sets the executor_config of this ModelResultResult. - - - :param executor_config: The executor_config of this ModelResultResult. - :type executor_config: str - """ - - self._executor_config = executor_config diff --git a/ymir/backend/src/ymir_viz/src/type_util.py b/ymir/backend/src/ymir_viz/src/type_util.py deleted file mode 100644 index 0563f81fd5..0000000000 --- a/ymir/backend/src/ymir_viz/src/type_util.py +++ /dev/null @@ -1,32 +0,0 @@ -# coding: utf-8 - -import sys - -if sys.version_info < (3, 7): - import typing - - def is_generic(klass): - """ Determine whether klass is a generic class """ - return type(klass) == typing.GenericMeta - - def is_dict(klass): - """ Determine whether klass is a Dict """ - return klass.__extra__ == dict - - def is_list(klass): - """ Determine whether klass is a List """ - return klass.__extra__ == list - -else: - - def is_generic(klass): - """ Determine whether klass is a generic class """ - return hasattr(klass, '__origin__') - - def is_dict(klass): - """ Determine whether klass is a Dict """ - return klass.__origin__ == dict - - def is_list(klass): - """ Determine whether klass is a List """ - return klass.__origin__ == list diff --git a/ymir/backend/src/ymir_viz/src/util.py b/ymir/backend/src/ymir_viz/src/util.py deleted file mode 100644 index 2f918de9f8..0000000000 --- a/ymir/backend/src/ymir_viz/src/util.py +++ /dev/null @@ -1,140 +0,0 @@ -import datetime - -import six - -from src import type_util - - -def _deserialize(data, klass): - """Deserializes dict, list, str into an object. - - :param data: dict, list or str. - :param klass: class literal, or string of class name. - - :return: object. - """ - if data is None: - return None - - if klass in six.integer_types or klass in (float, str, bool, bytearray): - return _deserialize_primitive(data, klass) - elif klass == object: - return _deserialize_object(data) - elif klass == datetime.date: - return deserialize_date(data) - elif klass == datetime.datetime: - return deserialize_datetime(data) - elif type_util.is_generic(klass): - if type_util.is_list(klass): - return _deserialize_list(data, klass.__args__[0]) - if type_util.is_dict(klass): - return _deserialize_dict(data, klass.__args__[1]) - else: - return deserialize_model(data, klass) - - -def _deserialize_primitive(data, klass): - """Deserializes to primitive type. - - :param data: data to deserialize. - :param klass: class literal. - - :return: int, long, float, str, bool. - :rtype: int | long | float | str | bool - """ - try: - value = klass(data) - except UnicodeEncodeError: - value = six.u(data) - except TypeError: - value = data - return value - - -def _deserialize_object(value): - """Return an original value. - - :return: object. - """ - return value - - -def deserialize_date(string): - """Deserializes string to date. - - :param string: str. - :type string: str - :return: date. - :rtype: date - """ - try: - from dateutil.parser import parse - return parse(string).date() - except ImportError: - return string - - -def deserialize_datetime(string): - """Deserializes string to datetime. - - The string should be in iso8601 datetime format. - - :param string: str. - :type string: str - :return: datetime. - :rtype: datetime - """ - try: - from dateutil.parser import parse - return parse(string) - except ImportError: - return string - - -def deserialize_model(data, klass): - """Deserializes list or dict to model. - - :param data: dict, list. - :type data: dict | list - :param klass: class literal. - :return: model object. - """ - instance = klass() - - if not instance.swagger_types: - return data - - for attr, attr_type in six.iteritems(instance.swagger_types): - if data is not None \ - and instance.attribute_map[attr] in data \ - and isinstance(data, (list, dict)): - value = data[instance.attribute_map[attr]] - setattr(instance, attr, _deserialize(value, attr_type)) - - return instance - - -def _deserialize_list(data, boxed_type): - """Deserializes a list and its elements. - - :param data: list to deserialize. - :type data: list - :param boxed_type: class literal. - - :return: deserialized list. - :rtype: list - """ - return [_deserialize(sub_data, boxed_type) for sub_data in data] - - -def _deserialize_dict(data, boxed_type): - """Deserializes a dict and its elements. - - :param data: dict to deserialize. - :type data: dict - :param boxed_type: class literal. - - :return: deserialized dict. - :rtype: dict - """ - return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)} diff --git a/ymir/backend/src/ymir_viz/src/viz_models/__init__.py b/ymir/backend/src/ymir_viz/src/viz_models/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ymir/backend/src/ymir_viz/src/viz_models/asset.py b/ymir/backend/src/ymir_viz/src/viz_models/asset.py deleted file mode 100644 index d9fc985765..0000000000 --- a/ymir/backend/src/ymir_viz/src/viz_models/asset.py +++ /dev/null @@ -1,195 +0,0 @@ -import logging -import threading -from typing import Dict, Union - -import yaml - -from src.config import viz_settings -from src.libs import utils -from src.libs.cache import redis_cache -from src.viz_models import pb_reader - - -class AssetsModel: - def __init__(self, user_id: str, repo_id: str, branch_id: str): - self.user_id = user_id - self.repo_id = repo_id - self.branch_id = branch_id - - self.redis_key_prefix = f"{user_id}_{repo_id}_{branch_id}:{viz_settings.VIZ_MIDDLE_VERSION}" - self.key_asset_detail = f"{self.redis_key_prefix}:{viz_settings.VIZ_KEY_ASSET_DETAIL}" - self.key_asset_index = f"{self.redis_key_prefix}:{viz_settings.VIZ_KEY_ASSET_INDEX}" - self.key_cache_status = f"{self.redis_key_prefix}:{viz_settings.VIZ_CACHE_STATUS}" - - def check_cache_existence(self) -> int: - detail_existence = redis_cache.exists(self.key_asset_detail) - all_index_key = f"{self.key_asset_index}:{viz_settings.VIZ_ALL_INDEX_CLASSIDS}" - index_existence = redis_cache.exists(all_index_key) - cache_status = redis_cache.get(self.key_cache_status) - if cache_status.get("flag"): - cache_flag = True - else: - cache_flag = False - - flag = detail_existence and index_existence and cache_flag - - return flag - - @classmethod - @utils.time_it - def set_asset_content_cache( - cls, - asset_content: Dict, - key_asset_detail: str, - key_asset_index: str, - key_cache_status: str, - ) -> None: - """ - set cache to Redis - hash xxx:detail {'asset_id': {'metadata': xxx, 'annotations': xxx, 'class_ids': xx}} - list xxx:class_id ['asset_id',] - str xxx:class_ids_count "{3:44, }" - str xxx:class_names_count "{'cat':44, }" - str xxx:ignored_labels "{'cat':5, }" - str xxx:negative_info "{ - "negative_images_cnt": 0, - "project_negative_images_cnt": 0}" - str xxx:total_images_cnt "1" - """ - if redis_cache.get(key_cache_status): - logging.info(f"Skip setting cache {key_asset_detail}, The other thread is writing cache now") - return - - logging.info(f"start setting cache {key_asset_detail}") - redis_cache.set(key_cache_status, {"flag": 0}) - with redis_cache.pipeline() as pipe: - for asset_id, asset_id_detail in asset_content["asset_ids_detail"].items(): - pipe.hset(name=key_asset_detail, mapping={asset_id: yaml.safe_dump(asset_id_detail)}) - pipe.execute() - - with redis_cache.pipeline() as pipe: - for class_id, assets_list in asset_content["class_ids_index"].items(): - if assets_list: - pipe.rpush(f"{key_asset_index}:{class_id}", *assets_list) - pipe.execute() - - redis_cache.set(key_cache_status, {"flag": 1}) - logging.info("finish setting cache!!!") - - def trigger_cache_generator(self, asset_content: Dict) -> None: - # async generate middle structure content cache - consumer_task = threading.Thread( - target=self.set_asset_content_cache, - args=( - asset_content, - self.key_asset_detail, - self.key_asset_index, - self.key_cache_status, - )) - consumer_task.start() - - @classmethod - def format_assets_info(cls, assets_content: Dict, offset: int, limit: int, class_id: int) -> Dict: - """ - return structure like this: - { - 'elements': [{'asset_id':xxx, 'class_ids':[2,3]},], - 'limit': 3, - 'offset': 1, - 'total': 234 - } - """ - asset_ids = assets_content["class_ids_index"][class_id][offset:limit + offset] - elements = [ - dict(asset_id=asset_id, class_ids=assets_content["asset_ids_detail"][asset_id]["class_ids"]) - for asset_id in asset_ids - ] - - result = dict(elements=elements, - limit=limit, - offset=offset, - total=len(assets_content["class_ids_index"][class_id]),) - - return result - - def get_assets_info_from_cache(self, offset: int, limit: int, class_id: int) -> Dict: - """ - return structure like this: - { - 'elements': [{'asset_id':xxx, 'class_ids':[2,3]},], - 'limit': 3, - 'offset': 1, - 'total': 234 - } - """ - asset_ids = redis_cache.lrange(f"{self.key_asset_index}:{class_id}", offset, offset + limit - 1) - assets_detail = redis_cache.hmget(self.key_asset_detail, asset_ids) - - elements = [] - for asset_id, asset_detail in zip(asset_ids, assets_detail): - elements.append(dict(asset_id=asset_id, class_ids=yaml.safe_load(asset_detail)["class_ids"])) - total = redis_cache.llen(f"{self.key_asset_index}:{class_id}") - result = dict(elements=elements, limit=limit, offset=offset, total=total) - - return result - - @utils.time_it - def get_assets_info(self, offset: int, limit: int, class_id: int) -> Dict: - """ - example return data: - [{ - 'annotations': [{'box': {'h': 329, 'w': 118, 'x': 1, 'y': 47}, 'class_id': 2}], - 'class_ids': [2, 30], - 'metadata': {'asset_type': 1, 'height': 375, 'image_channels': 3, 'timestamp': {'start': 123}, 'width': 500} - }] - """ - class_id = class_id if class_id is not None else viz_settings.VIZ_ALL_INDEX_CLASSIDS - - if self.check_cache_existence(): - result = self.get_assets_info_from_cache(offset=offset, limit=limit, class_id=class_id) - logging.info("get_assets_info from cache") - else: - assets_content = pb_reader.MirStorageLoader( - sandbox_root=viz_settings.BACKEND_SANDBOX_ROOT, - user_id=self.user_id, - repo_id=self.repo_id, - branch_id=self.branch_id, - task_id=self.branch_id, - ).get_assets_content() - result = self.format_assets_info(assets_content=assets_content, - offset=offset, - limit=limit, - class_id=class_id) - - # asynchronous generate cache content,and we can add some policy to trigger it later - self.trigger_cache_generator(assets_content) - - return result - - @utils.time_it - def get_asset_id_info(self, asset_id: str) -> Union[Dict, None]: - """ - example return data: - { - 'annotations': [{'box': {'h': 329, 'w': 118, 'x': 1, 'y': 47}, 'class_id': 2}], - 'class_ids': [2, 30], - 'metadata': {'asset_type': 1, 'height': 375, 'image_channels': 3, 'timestamp': {'start': 123}, 'width': 500} - } - """ - if self.check_cache_existence(): - result = redis_cache.hget(self.key_asset_detail, asset_id) - logging.info(f"get_asset_id: {asset_id} from cache") - else: - assets_content = pb_reader.MirStorageLoader( - sandbox_root=viz_settings.BACKEND_SANDBOX_ROOT, - user_id=self.user_id, - repo_id=self.repo_id, - branch_id=self.branch_id, - task_id=self.branch_id, - ).get_assets_content() - result = assets_content["asset_ids_detail"][asset_id] - - # asynchronous generate cache content,and we can add some policy to trigger it later - self.trigger_cache_generator(assets_content) - - return result diff --git a/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py b/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py deleted file mode 100644 index 0178a58452..0000000000 --- a/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py +++ /dev/null @@ -1,144 +0,0 @@ -import logging -import os -from typing import Dict - -from mir.tools import mir_storage_ops, errors - -from src.config import viz_settings -from src.libs import exceptions - - -class MirStorageLoader: - def __init__(self, sandbox_root: str, user_id: str, repo_id: str, branch_id: str, task_id: str): - self.mir_root = os.path.join(sandbox_root, user_id, repo_id) - self.branch_id = branch_id - if not task_id: - task_id = branch_id - self.task_id = task_id - - def get_model_info(self) -> Dict: - try: - model_info = mir_storage_ops.MirStorageOps.load_single_model( - mir_root=self.mir_root, - mir_branch=self.branch_id, - mir_task_id=self.task_id, - ) - except errors.MirError: - raise exceptions.ModelNotExists(f"model {self.branch_id} not found") - - return model_info - - def get_dataset_info(self) -> Dict: - """ - return value example: - { - "class_ids_count": {3: 34}, - "class_names_count": {'cat': 34}, - "ignored_labels": {'cat':5, }, - "negative_info": { - "negative_images_cnt": 0, - "project_negative_images_cnt": 0, - }, - "total_images_cnt": 1, - } - """ - try: - dataset_info = mir_storage_ops.MirStorageOps.load_single_dataset( - mir_root=self.mir_root, - mir_branch=self.branch_id, - mir_task_id=self.task_id, - ) - except ValueError as e: - logging.error(e) - raise exceptions.BranchNotExists(f"dataset {self.branch_id} not exist from ymir command") - - return dataset_info - - def get_assets_content(self) -> Dict: - """ - return value example: - { - "all_asset_ids": ["asset_id"], - "asset_ids_detail": { - "asset_id": { - "metadata": {"asset_type": 2, "width": 1080, "height": 1620,}, - "annotations": [{"box": {"x": 26, "y": 189, "w": 19, "h": 50}, "class_id": 2}], - "class_ids": [2, 3], - } - }, - "class_ids_index": {3: ["asset_id",], - } - """ - try: - assets_info = mir_storage_ops.MirStorageOps.load_assets_content( - mir_root=self.mir_root, - mir_branch=self.branch_id, - mir_task_id=self.task_id, - ) - except ValueError as e: - logging.error(e) - raise exceptions.BranchNotExists(f"branch {self.branch_id} not exist from ymir command") - assets_info["class_ids_index"][viz_settings.VIZ_ALL_INDEX_CLASSIDS] = assets_info["all_asset_ids"] - - return assets_info - - def get_dataset_evaluations(self) -> Dict: - """ - return value example: - { - "dataset_hash":{ - "iou_averaged_evaluation":{ - "ci_averaged_evaluation":{ - "ap":1.0, - "ar":1.0, - "fn":0, - "fp":0, - "tp":4329 - }, - "ci_evaluations":{ - "4":{ - "ap":1.0, - "ar":1.0, - "fn":0, - "fp":0, - "tp":91 - } - }, - "topic_evaluations":{} - }, - "iou_evaluations":{ - "0.50":{ - "ci_averaged_evaluation":{ - "ap":1.0, - "ar":1.0, - "fn":0, - "fp":0, - "tp":4329 - }, - "ci_evaluations":{ - "2":{ - "ap":1.0, - "ar":1.0, - "fn":0, - "fp":0, - "tp":4238 - } - }, - "topic_evaluations":{} - }, - "topic_evaluations":{} - } - } - } - """ - try: - evaluation = mir_storage_ops.MirStorageOps.load_dataset_evaluations( - mir_root=self.mir_root, - mir_branch=self.branch_id, - mir_task_id=self.task_id, - ) - except errors.MirError: - logging.exception("evaluation %s not found", self.branch_id) - raise exceptions.DatasetEvaluationNotExists(f"evaluation {self.branch_id} not found") - - return evaluation diff --git a/ymir/backend/src/ymir_viz/tests/__init__.py b/ymir/backend/src/ymir_viz/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ymir/backend/src/ymir_viz/tests/conftest.py b/ymir/backend/src/ymir_viz/tests/conftest.py deleted file mode 100644 index bdde841cf8..0000000000 --- a/ymir/backend/src/ymir_viz/tests/conftest.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- -import pytest - -from src.app import create_connexion_app - -connexion_app_cache = None - - -def get_app(): - global connexion_app_cache - if not connexion_app_cache: - config = dict() - connexion_app_cache = create_connexion_app(config) - return connexion_app_cache - - -@pytest.fixture(autouse=True) -def core_app(): - connexion_app = get_app() - app = connexion_app.app - - context = app.app_context() - context.push() - - try: - yield app - finally: - context.pop() diff --git a/ymir/backend/src/ymir_viz/tests/controllers/__init__.py b/ymir/backend/src/ymir_viz/tests/controllers/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ymir/backend/src/ymir_viz/tests/controllers/conftest.py b/ymir/backend/src/ymir_viz/tests/controllers/conftest.py deleted file mode 100644 index 0e11da2836..0000000000 --- a/ymir/backend/src/ymir_viz/tests/controllers/conftest.py +++ /dev/null @@ -1,17 +0,0 @@ -import json - -import pytest -from flask import Response -from flask.testing import FlaskClient - - -class APIResponse(Response): - def json(self): - return json.loads(self.data) - - -@pytest.fixture() -def test_client(core_app): - core_app.test_client_class = FlaskClient - core_app.response_class = APIResponse - return core_app.test_client() diff --git a/ymir/backend/src/ymir_viz/tests/controllers/test_asset_controller.py b/ymir/backend/src/ymir_viz/tests/controllers/test_asset_controller.py deleted file mode 100644 index cd1f73cdb3..0000000000 --- a/ymir/backend/src/ymir_viz/tests/controllers/test_asset_controller.py +++ /dev/null @@ -1,79 +0,0 @@ -import pytest -from mir.tools.mir_storage_ops import MirStorageOps - - -@pytest.fixture() -def mock_mir_content(mocker): - mocker.patch.object(MirStorageOps, - "load_assets_content", - return_value={ - "all_asset_ids": ["asset_id"], - "asset_ids_detail": { - "asset_id": { - "metadata": { - "asset_type": 2, - "width": 1080, - "height": 1620 - }, - "annotations": [{ - "box": { - "x": 26, - "y": 189, - "w": 19, - "h": 50 - }, - "class_id": 2 - }], - "class_ids": [2], - } - }, - "class_ids_index": { - 2: ["asset_id"], - }, - }) - - -class TestAssetController: - def test_get_asserts_info(self, test_client, mock_mir_content): - user_id = "user_id" - repo_id = "repo_id" - branch_id = "branch_id" - expect_data = {"elements": [{"asset_id": "asset_id", "class_ids": [2]}], "limit": 20, "offset": 0, 'total': 1} - resp = test_client.get(f"/v1/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets") - assert resp.status_code == 200 - assert resp.json()["result"] == expect_data - - expect_data = {'elements': [{'asset_id': 'asset_id', 'class_ids': [2]}], 'limit': 20, 'offset': 0, 'total': 1} - filter_class_id = "class_id=2" - resp = test_client.get( - f"/v1/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets?{filter_class_id}") - assert resp.status_code == 200 - assert resp.json()["result"] == expect_data - - def test_get_assert_id_info(self, test_client, mock_mir_content): - user_id = "user_id" - repo_id = "repo_id" - branch_id = "branch_id" - asset_id = "asset_id" - - expect_data = { - 'annotations': [{ - 'box': { - 'h': 50, - 'w': 19, - 'x': 26, - 'y': 189 - }, - 'class_id': 2 - }], - 'class_ids': [2], - 'metadata': { - 'asset_type': 2, - 'height': 1620, - 'width': 1080 - } - } - resp = test_client.get(f"/v1/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/assets/{asset_id}") - - assert resp.status_code == 200 - assert resp.json()["result"] == expect_data diff --git a/ymir/backend/src/ymir_viz/tests/controllers/test_dataset_controller.py b/ymir/backend/src/ymir_viz/tests/controllers/test_dataset_controller.py deleted file mode 100644 index 8ff94ac073..0000000000 --- a/ymir/backend/src/ymir_viz/tests/controllers/test_dataset_controller.py +++ /dev/null @@ -1,46 +0,0 @@ -from mir.tools.mir_storage_ops import MirStorageOps - - -class TestDatasetController: - def test_get_dataset_info(self, test_client, mocker): - user_id = "user_id" - repo_id = "repo_id" - branch_id = "branch_id" - - mir_dataset_content = { - "class_names_count": { - 'cat': 34 - }, - "class_ids_count": { - 3: 34 - }, - "ignored_labels": { - 'cat': 5, - }, - "negative_info": { - "negative_images_cnt": 0, - "project_negative_images_cnt": 0, - }, - "total_images_cnt": 1, - } - - mocker.patch.object(MirStorageOps, "load_single_dataset", return_value=mir_dataset_content) - resp = test_client.get(f"/v1/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/datasets") - - assert resp.status_code == 200 - assert resp.json()["result"] == { - 'class_ids_count': { - '3': 34 # int is converted to str in json.dumps. - }, - 'class_names_count': { - 'cat': 34 - }, - 'ignored_labels': { - 'cat': 5 - }, - 'negative_info': { - 'negative_images_cnt': 0, - 'project_negative_images_cnt': 0 - }, - 'total_images_cnt': 1 - } diff --git a/ymir/backend/src/ymir_viz/tests/controllers/test_model_controller.py b/ymir/backend/src/ymir_viz/tests/controllers/test_model_controller.py deleted file mode 100644 index a77b450517..0000000000 --- a/ymir/backend/src/ymir_viz/tests/controllers/test_model_controller.py +++ /dev/null @@ -1,26 +0,0 @@ -from mir.tools.mir_storage_ops import MirStorageOps - - -class TestModelController: - def test_get_model_info(self, test_client, mocker): - user_id = "user_id" - repo_id = "repo_id" - branch_id = "branch_id" - - mir_tasks_content = { - "model_hash": "model_hash", - "mean_average_precision": 0.88, - "task_parameters": "mock_task_parameters", - "executor_config": "mock_executor_config", - } - - mocker.patch.object(MirStorageOps, "load_single_model", return_value=mir_tasks_content) - resp = test_client.get(f"/v1/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/models") - - assert resp.status_code == 200 - assert resp.json()["result"] == { - "model_id": "model_hash", - "model_mAP": 0.88, - "task_parameters": "mock_task_parameters", - "executor_config": "mock_executor_config", - } diff --git a/ymir/backend/src/ymir_viz/tests/utils/__init__.py b/ymir/backend/src/ymir_viz/tests/utils/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ymir/backend/src/ymir_viz/tests/utils/test_encoder.py b/ymir/backend/src/ymir_viz/tests/utils/test_encoder.py deleted file mode 100644 index 768489a36e..0000000000 --- a/ymir/backend/src/ymir_viz/tests/utils/test_encoder.py +++ /dev/null @@ -1,12 +0,0 @@ -from src.encoder import JSONEncoder -from src.swagger_models.asset_info import AssetInfo - - -class TestJSONEncoder: - def test_default(self): - data = AssetInfo(asset_id="mock_asset_id", class_ids=[1, 2, 3]) - rep = JSONEncoder().default(data) - - expected_data = {"asset_id": "mock_asset_id", "class_ids": [1, 2, 3]} - - assert expected_data == rep diff --git a/ymir/backend/src/ymir_viz/wsgi.py b/ymir/backend/src/ymir_viz/wsgi.py deleted file mode 100644 index b5e95863e5..0000000000 --- a/ymir/backend/src/ymir_viz/wsgi.py +++ /dev/null @@ -1,9 +0,0 @@ -import sys - -from src.app import create_connexion_app - -connexion_app = create_connexion_app() - -if __name__ == "__main__": - port = 9099 if len(sys.argv) <= 1 else int(sys.argv[1]) - connexion_app.run(host="0.0.0.0", port=port, debug=True) diff --git a/ymir/backend/tox.ini b/ymir/backend/tox.ini index 89182fddfa..2ab8484bd0 100644 --- a/ymir/backend/tox.ini +++ b/ymir/backend/tox.ini @@ -14,8 +14,9 @@ allowlist_externals = cp mkdir git + time setenv = - PYTHONPATH = {toxinidir}/src/ymir_app:{toxinidir}/src/ymir_viz:{toxinidir}/src/ymir_controller:{toxinidir}/src/common:{toxinidir}/src/ymir_monitor + PYTHONPATH = {toxinidir}/src/ymir_app:{toxinidir}/src/ymir_controller:{toxinidir}/src/common:{toxinidir}/src/ymir_monitor PIP_DEFAULT_TIMEOUT = 100 FIRST_ADMIN = admin@example.com FIRST_ADMIN_PASSWORD = fakepasswd @@ -32,38 +33,31 @@ deps = commands = git config --global user.name 'ci' git config --global user.email 'ci-test@ymir-team' + pip3 uninstall -y ymir-cmd pip3 install {toxinidir}/../command coverage erase - pytest -vv -s -n=4 --cov={toxinidir}/src/ymir_monitor/monitor --cov-config={toxinidir}/src/ymir_monitor/.coveragerc -sx {toxinidir}/src/ymir_monitor/tests + time pytest --durations=0 -v -sx -n=4 --cov={toxinidir}/src/ymir_monitor/monitor --cov-config={toxinidir}/src/ymir_monitor/.coveragerc {toxinidir}/src/ymir_monitor/tests + rm -rf app.db {toxinidir}/src/ymir_app/tmp {toxinidir}/{static,alembic} mkdir -p {toxinidir}/src/ymir_app/tmp cp -rf {toxinidir}/src/ymir_app/alembic {toxinidir}/alembic python {toxinidir}/src/ymir_app/app/backend_pre_start.py alembic -c {toxinidir}/src/ymir_app/alembic.ini -x sqlite=True upgrade head python {toxinidir}/src/ymir_app/app/initial_data.py cp -rf {toxinidir}/src/ymir_app/static {toxinidir}/static - coverage erase - pytest --cov={toxinidir}/src/ymir_app/app --cov-config={toxinidir}/src/ymir_app/.coveragerc -sx {toxinidir}/src/ymir_app/tests - rm -r app.db {toxinidir}/src/ymir_app/tmp - rm -rf {toxinidir}/static - rm -rf {toxinidir}/alembic + time pytest --durations=0 -v -sx --cov={toxinidir}/src/ymir_app/app --cov-config={toxinidir}/src/ymir_app/.coveragerc {toxinidir}/src/ymir_app/tests + rm -rf app.db {toxinidir}/src/ymir_app/tmp {toxinidir}/{static,alembic} coverage erase - pytest -v -x --durations=0 -n=4 --cov={toxinidir}/src/ymir_controller/controller/invoker \ + time pytest -v -xs --durations=0 -n=4 --cov={toxinidir}/src/ymir_controller/controller/invoker \ --cov={toxinidir}/src/ymir_controller/controller/label_model --cov={toxinidir}/src/ymir_controller/controller/utils \ -sx {toxinidir}/src/ymir_controller/tests --log-level=INFO - rm -rf {toxinidir}/.local/ - rm -r {toxinidir}/.gitconfig - rm -rf {toxinidir}/.config/ - - coverage erase - pytest -n=4 --cov={toxinidir}/src/ymir_viz/src --cov-config={toxinidir}/src/ymir_viz/.coveragerc -sx {toxinidir}/src/ymir_viz/tests + rm -rf {toxinidir}/{.local,.gitconfig,.config} - flake8 src - mypy src/ymir_app - mypy src/ymir_controller - mypy src/ymir_monitor - mypy src/ymir_viz + time flake8 src + time mypy src/ymir_app + time mypy src/ymir_controller + time mypy src/ymir_monitor diff --git a/ymir/command/mir/__init__.py b/ymir/command/mir/__init__.py index d576129df6..9d80294107 100644 --- a/ymir/command/mir/__init__.py +++ b/ymir/command/mir/__init__.py @@ -1,4 +1,7 @@ import logging import sys -logging.basicConfig(stream=sys.stdout, format='%(message)s', level=logging.INFO) +logging.basicConfig(stream=sys.stdout, + format='%(levelname)-4s: [%(asctime)s] %(filename)s:%(lineno)-03s: %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) diff --git a/ymir/command/mir/cli.py b/ymir/command/mir/cli.py index 680567f711..c0b8767f1f 100644 --- a/ymir/command/mir/cli.py +++ b/ymir/command/mir/cli.py @@ -6,12 +6,12 @@ from typing import Any, cast, Protocol from mir import version -from mir.commands import (init, branch, checkout, commit, copy, evaluate, exporting, filter, log, merge, reset, - sampling, show, status, training, mining, importing, infer, model_importing) +from mir.commands import (init, checkout, commit, copy, export, filter, merge, + sampling, show, status, training, mining, import_dataset, import_model, infer) _COMMANDS_ = [ - init, branch, checkout, commit, copy, evaluate, exporting, filter, log, merge, reset, sampling, show, status, - training, mining, importing, infer, model_importing + init, checkout, commit, copy, export, filter, merge, sampling, show, status, training, mining, import_dataset, + import_model, infer ] @@ -41,7 +41,7 @@ def parse_args(self, args: Any = None, namespace: Any = None) -> Any: class VersionAction(argparse.Action): """Show mir version and exits""" def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any = None) -> None: - logging.info("mir version: {0}".format(version.__version__)) + logging.info(f"mir version: {version.YMIR_VERSION}") sys.exit(0) @@ -52,7 +52,7 @@ def __call__(self, parser: Any, namespace: Any, values: Any, option_string: Any logging.root.removeHandler(handler) logging.basicConfig(stream=sys.stdout, - format='%(levelname)-8s: [%(asctime)s] %(filename)s:%(lineno)s:%(funcName)s(): %(message)s', + format='%(levelname)-8s: [%(asctime)s] %(filename)s:%(lineno)-03s: %(message)s', datefmt='%Y%m%d-%H:%M:%S', level=logging.DEBUG) logging.debug("in debug mode") diff --git a/ymir/command/mir/commands/base.py b/ymir/command/mir/commands/base.py index 551f76051e..08f3c588b9 100644 --- a/ymir/command/mir/commands/base.py +++ b/ymir/command/mir/commands/base.py @@ -1,8 +1,7 @@ from abc import ABC, abstractmethod import logging from typing import Any - -from mir.tools import utils +from mir.tools.code import time_it class BaseCommand(ABC): @@ -12,7 +11,7 @@ class BaseCommand(ABC): def __init__(self, args: Any): self.args = args - @utils.time_it + @time_it def cmd_run(self) -> int: return self.run() diff --git a/ymir/command/mir/commands/branch.py b/ymir/command/mir/commands/branch.py deleted file mode 100644 index 397c401fba..0000000000 --- a/ymir/command/mir/commands/branch.py +++ /dev/null @@ -1,48 +0,0 @@ -import argparse -import logging - -from mir import scm -from mir.commands import base -from mir.tools import checker -from mir.tools.code import MirCode - - -class CmdBranch(base.BaseCommand): - @staticmethod - def run_with_args(mir_root: str, force_delete: str) -> int: - return_code = checker.check(mir_root, [checker.Prerequisites.IS_INSIDE_MIR_REPO]) - if return_code != MirCode.RC_OK: - return return_code - - # can not delete master branch - if force_delete == "master": - logging.info("can not delete master branch") - return MirCode.RC_CMD_INVALID_BRANCH_OR_TAG - - cmd_opts = [] - if force_delete: - cmd_opts.extend(["-D", force_delete]) - - repo_git = scm.Scm(mir_root, scm_executable="git") - output_str = repo_git.branch(cmd_opts) - if output_str: - logging.info("\n%s" % output_str) - - return MirCode.RC_OK - - def run(self) -> int: - logging.debug("command branch: %s" % self.args) - - return CmdBranch.run_with_args(mir_root=self.args.mir_root, force_delete=self.args.force_delete) - - -def bind_to_subparsers(subparsers: argparse._SubParsersAction, - parent_parser: argparse.ArgumentParser) -> None: - branch_arg_parser = subparsers.add_parser("branch", - parents=[parent_parser], - description="use this command to show mir repo branches", - help="show mir repo branches") - delete_group = branch_arg_parser.add_mutually_exclusive_group() - group = delete_group.add_mutually_exclusive_group() - group.add_argument("-D", dest="force_delete", type=str, help="delete branch, even if branch not merged") - branch_arg_parser.set_defaults(func=CmdBranch) diff --git a/ymir/command/mir/commands/commit.py b/ymir/command/mir/commands/commit.py index 0d9b49d898..06cbb8f97a 100644 --- a/ymir/command/mir/commands/commit.py +++ b/ymir/command/mir/commands/commit.py @@ -1,5 +1,6 @@ import argparse import logging +import os from mir import scm from mir.commands import base @@ -21,6 +22,10 @@ def run_with_args(mir_root: str, msg: str) -> int: return MirCode.RC_CMD_INVALID_MIR_REPO repo_git = scm.Scm(root_dir=mir_root, scm_executable='git') + git_attr_path = os.path.join(mir_root, '.gitattributes') + if not os.path.isfile(git_attr_path): + with open(git_attr_path, 'w') as f: + f.write('*.mir binary\n') repo_git.add('.') output_str = repo_git.commit(["-m", msg]) logging.info("\n%s" % output_str) diff --git a/ymir/command/mir/commands/copy.py b/ymir/command/mir/commands/copy.py index 442e984bc6..efcf0f6d4a 100644 --- a/ymir/command/mir/commands/copy.py +++ b/ymir/command/mir/commands/copy.py @@ -1,11 +1,10 @@ import argparse -from collections import defaultdict import logging -from typing import Dict, List, Set, Tuple +from typing import Dict, List from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, class_ids, revs_parser, mir_repo_utils, mir_storage, mir_storage_ops +from mir.tools import checker, class_ids, revs_parser, mir_repo_utils, mir_storage_ops from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out from mir.tools.errors import MirRuntimeError @@ -61,34 +60,52 @@ def run_with_args(mir_root: str, if check_code != MirCode.RC_OK: return check_code + PhaseLoggerCenter.update_phase(phase="copy.init") + # read from src mir root - [mir_metadatas, mir_annotations, mir_keywords, mir_tasks, - _] = mir_storage_ops.MirStorageOps.load_multiple_storages(mir_root=data_mir_root, - mir_branch=data_src_typ_rev_tid.rev, - mir_task_id=data_src_typ_rev_tid.tid, - ms_list=mir_storage.get_all_mir_storage(), - as_dict=False) + mir_metadatas: mirpb.MirMetadatas + mir_annotations: mirpb.MirAnnotations + mir_context: mirpb.MirContext + mir_tasks: mirpb.MirTasks + mir_metadatas, mir_annotations, mir_context, mir_tasks = mir_storage_ops.MirStorageOps.load_multiple_storages( + mir_root=data_mir_root, + mir_branch=data_src_typ_rev_tid.rev, + mir_task_id=data_src_typ_rev_tid.tid, + ms_list=[mirpb.MIR_METADATAS, mirpb.MIR_ANNOTATIONS, mirpb.MIR_CONTEXT, mirpb.MIR_TASKS], + as_dict=False) PhaseLoggerCenter.update_phase(phase='copy.read') - orig_head_task_id = mir_annotations.head_task_id - if not orig_head_task_id: - logging.error('bad annotations.mir: empty head task id') - return MirCode.RC_CMD_INVALID_MIR_REPO - if ((len(mir_annotations.task_annotations) > 0 and orig_head_task_id not in mir_annotations.task_annotations)): - logging.error(f"bad annotations.mir: can not find head task id: {orig_head_task_id}") - return MirCode.RC_CMD_INVALID_MIR_REPO - - single_task_annotations, unknown_types = CmdCopy._change_single_task_annotations( - data_mir_root=data_mir_root, - dst_mir_root=mir_root, - single_task_annotations=mir_annotations.task_annotations[orig_head_task_id], - ignore_unknown_types=ignore_unknown_types, - drop_annotations=drop_annotations) - - mir_annotations.task_annotations[dst_typ_rev_tid.tid].CopyFrom(single_task_annotations) - del mir_annotations.task_annotations[orig_head_task_id] - mir_annotations.head_task_id = dst_typ_rev_tid.tid + need_change_class_ids = True + unknown_names_and_count = {} + if drop_annotations: + mir_annotations.prediction.Clear() + mir_annotations.ground_truth.Clear() + need_change_class_ids = False + if data_mir_root == mir_root: + need_change_class_ids = False + + if need_change_class_ids: + src_class_id_mgr = class_ids.load_or_create_userlabels(mir_root=data_mir_root) + dst_class_id_mgr = class_ids.load_or_create_userlabels(mir_root=mir_root) + src_to_dst_ids = { + src_class_id_mgr.id_and_main_name_for_name(n)[0]: dst_class_id_mgr.id_and_main_name_for_name(n)[0] + for n in src_class_id_mgr.all_main_names() + } + + CmdCopy._change_type_ids(single_task_annotations=mir_annotations.prediction, src_to_dst_ids=src_to_dst_ids) + CmdCopy._change_type_ids(single_task_annotations=mir_annotations.ground_truth, + src_to_dst_ids=src_to_dst_ids) + unknown_names_and_count = CmdCopy._gen_unknown_names_and_count(src_class_id_mgr=src_class_id_mgr, + mir_context=mir_context, + src_to_dst_ids=src_to_dst_ids) + + if unknown_names_and_count: + if ignore_unknown_types: + logging.warning(f"unknown types: {unknown_names_and_count}") + else: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_MIR_REPO, + error_message=f"copy annotations error, unknown types: {unknown_names_and_count}") # tasks.mir: get necessary head task infos, remove others and change head task id orig_head_task_id = mir_tasks.head_task_id @@ -102,13 +119,12 @@ def run_with_args(mir_root: str, PhaseLoggerCenter.update_phase(phase='copy.change') # save and commit - orig_task = mir_tasks.tasks[orig_head_task_id] + orig_task: mirpb.Task = mir_tasks.tasks[orig_head_task_id] task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeCopyData, task_id=dst_typ_rev_tid.tid, message=f"copy from {data_mir_root}, src: {data_src_revs}, dst: {dst_rev}", - unknown_types=unknown_types, - model_hash=orig_task.model.model_hash, - model_mAP=orig_task.model.mean_average_precision, + new_types=unknown_names_and_count, + model_meta=orig_task.model, serialized_task_parameters=orig_task.serialized_task_parameters, serialized_executor_config=orig_task.serialized_executor_config, executor=orig_task.executor, @@ -128,67 +144,41 @@ def run_with_args(mir_root: str, @staticmethod def _change_type_ids( single_task_annotations: mirpb.SingleTaskAnnotations, - data_mir_root: str, - dst_mir_root: str, - ) -> Tuple[int, Dict[str, int]]: - src_to_dst_ids: Dict[int, int] = {} - unknown_types_and_count: Dict[str, int] = defaultdict(int) - dst_class_id_mgr = class_ids.ClassIdManager(mir_root=dst_mir_root) - src_class_id_mgr = class_ids.ClassIdManager(mir_root=data_mir_root) - - for asset_id, single_image_annotations in single_task_annotations.image_annotations.items(): - dst_keyids_set: Set[int] = set() - dst_image_annotations: List[mirpb.Annotation] = [] - for annotation in single_image_annotations.annotations: - src_type_id = annotation.class_id - if not src_class_id_mgr.has_id(src_type_id): - # if we can not find src type id in data_mir_root's labels.csv, this repo in invalid and cannot copy - logging.error(f"broken data_mir_root, unknown src id: {annotation.class_id}") - return MirCode.RC_CMD_INVALID_MIR_REPO, unknown_types_and_count - - if src_type_id in src_to_dst_ids: - # get mapping from cache - annotation.class_id = src_to_dst_ids[src_type_id] + src_to_dst_ids: Dict[int, int], + ) -> None: + for single_image_annotations in single_task_annotations.image_annotations.values(): + dst_image_annotations: List[mirpb.ObjectAnnotation] = [] + for annotation in single_image_annotations.boxes: + dst_class_id = src_to_dst_ids[annotation.class_id] + if dst_class_id >= 0: + annotation.class_id = dst_class_id dst_image_annotations.append(annotation) - else: - # if no cache, src_id -> src_type_name -> dst_id - src_type_name = src_class_id_mgr.main_name_for_id(src_type_id) or '' - if dst_class_id_mgr.has_name(src_type_name): - annotation.class_id = dst_class_id_mgr.id_and_main_name_for_name(src_type_name)[0] - dst_image_annotations.append(annotation) - - src_to_dst_ids[src_type_id] = annotation.class_id # save cache - dst_keyids_set.add(annotation.class_id) - else: - unknown_types_and_count[src_type_name] += 1 - - del single_image_annotations.annotations[:] - single_image_annotations.annotations.extend(dst_image_annotations) - return MirCode.RC_OK, unknown_types_and_count + del single_image_annotations.boxes[:] + single_image_annotations.boxes.extend(dst_image_annotations) - @staticmethod - def _change_single_task_annotations(data_mir_root: str, dst_mir_root: str, - single_task_annotations: mirpb.SingleTaskAnnotations, - ignore_unknown_types: bool, - drop_annotations: bool) -> Tuple[mirpb.SingleTaskAnnotations, Dict[str, int]]: - if drop_annotations: - return mirpb.SingleTaskAnnotations(), {} - - # if don't want to drop annotations - # annotations.mir and keywords.mir: change type ids - return_code, unknown_types = CmdCopy._change_type_ids(single_task_annotations=single_task_annotations, - data_mir_root=data_mir_root, - dst_mir_root=dst_mir_root) - if return_code != MirCode.RC_OK: - raise MirRuntimeError(error_code=return_code, error_message='change annotation type ids failed') - if unknown_types: - if ignore_unknown_types: - logging.warning(f"unknown types: {unknown_types}") - else: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_MIR_REPO, - error_message=f"copy annotations error, unknown types: {unknown_types}") + dst_eval_class_ids: List[int] = [] + for src_class_id in single_task_annotations.eval_class_ids: + dst_class_id = src_to_dst_ids[src_class_id] + if dst_class_id >= 0: + dst_eval_class_ids.append(dst_class_id) + single_task_annotations.eval_class_ids[:] = dst_eval_class_ids - return single_task_annotations, unknown_types + @staticmethod + def _gen_unknown_names_and_count(src_class_id_mgr: class_ids.UserLabels, mir_context: mirpb.MirContext, + src_to_dst_ids: Dict[int, int]) -> Dict[str, int]: + all_src_class_ids = set(mir_context.pred_stats.class_ids_cnt.keys()) | set( + mir_context.gt_stats.class_ids_cnt.keys()) + unknown_src_class_ids = {src_id for src_id in all_src_class_ids if src_to_dst_ids[src_id] == -1} + if not unknown_src_class_ids: + return {} + + unknown_names_and_count: Dict[str, int] = {} + for src_id in unknown_src_class_ids: + name = src_class_id_mgr.main_name_for_id(src_id) + cnt_gt: int = mir_context.pred_stats.class_ids_cnt[src_id] + cnt_pred: int = mir_context.gt_stats.class_ids_cnt[src_id] + unknown_names_and_count[name] = cnt_gt + cnt_pred + return unknown_names_and_count def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: diff --git a/ymir/command/mir/commands/evaluate.py b/ymir/command/mir/commands/evaluate.py deleted file mode 100644 index 8ce0aa31ac..0000000000 --- a/ymir/command/mir/commands/evaluate.py +++ /dev/null @@ -1,103 +0,0 @@ -import argparse -import logging - -from mir.commands import base -from mir.tools import checker, det_eval, mir_storage_ops, revs_parser -from mir.tools.code import MirCode -from mir.tools.command_run_in_out import command_run_in_out -from mir.protos import mir_command_pb2 as mirpb - - -class CmdEvaluate(base.BaseCommand): - def run(self) -> int: - logging.info(f"command evaluate: {self.args}") - - return CmdEvaluate.run_with_args(work_dir=self.args.work_dir, - src_revs=self.args.src_revs, - dst_rev=self.args.dst_rev, - gt_rev=self.args.gt_rev, - mir_root=self.args.mir_root, - conf_thr=self.args.conf_thr, - iou_thrs=self.args.iou_thrs, - need_pr_curve=self.args.need_pr_curve) - - @staticmethod - @command_run_in_out - def run_with_args(work_dir: str, src_revs: str, dst_rev: str, gt_rev: str, mir_root: str, conf_thr: float, - iou_thrs: str, need_pr_curve: bool) -> int: - src_rev_tids = revs_parser.parse_arg_revs(src_revs) - gt_rev_tid = revs_parser.parse_single_arg_rev(gt_rev, need_tid=False) - dst_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) - - return_code = checker.check(mir_root, - [checker.Prerequisites.IS_INSIDE_MIR_REPO, checker.Prerequisites.IS_CLEAN]) - if return_code != MirCode.RC_OK: - return return_code - - # read pred and gt - mir_gt = det_eval.MirCoco(mir_root=mir_root, rev_tid=gt_rev_tid, conf_thr=conf_thr) - mir_dts = mir_gt.load_dts_from_gt(mir_root=mir_root, rev_tids=src_rev_tids, conf_thr=conf_thr) - - # eval - evaluate_config = mirpb.EvaluateConfig() - evaluate_config.conf_thr = conf_thr - evaluate_config.iou_thrs_interval = iou_thrs - evaluate_config.need_pr_curve = need_pr_curve - evaluate_config.gt_dataset_id = mir_gt.dataset_id - evaluate_config.pred_dataset_ids.extend([mir_dt.dataset_id for mir_dt in mir_dts]) - evaluation = det_eval.det_evaluate(mir_dts=mir_dts, mir_gt=mir_gt, config=evaluate_config) - - _show_evaluation(evaluation=evaluation) - - # save and commit - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeEvaluate, - task_id=dst_rev_tid.tid, - message='evaluate', - evaluation=evaluation, - src_revs=src_revs, - dst_rev=dst_rev) - mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, - mir_branch=dst_rev_tid.rev, - his_branch=src_rev_tids[0].rev, - mir_datas={}, - task=task) - - return MirCode.RC_OK - - -def _show_evaluation(evaluation: mirpb.Evaluation) -> None: - for dataset_id, dataset_evaluation in evaluation.dataset_evaluations.items(): - cae = dataset_evaluation.iou_averaged_evaluation.ci_averaged_evaluation - logging.info(f"gt: {evaluation.config.gt_dataset_id} vs pred: {dataset_id}, mAP: {cae.ap}") - - -def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: - evaluate_arg_parser = subparsers.add_parser('evaluate', - parents=[parent_parser], - description='use this command to evaluate model with ground truth', - help='evaluate model with ground truth') - evaluate_arg_parser.add_argument('-w', dest='work_dir', type=str, help='work place for training') - evaluate_arg_parser.add_argument("--src-revs", dest="src_revs", type=str, required=True, help="prediction rev@tid") - evaluate_arg_parser.add_argument("--gt-rev", dest="gt_rev", type=str, required=True, help="ground truth rev@tid") - evaluate_arg_parser.add_argument("--dst-rev", - dest="dst_rev", - type=str, - required=True, - help="rev@tid: destination branch name and task id") - evaluate_arg_parser.add_argument('--conf-thr', - dest='conf_thr', - type=float, - required=False, - default=0.3, - help='confidence threshold, default 0.3') - evaluate_arg_parser.add_argument('--iou-thrs', - dest='iou_thrs', - type=str, - required=False, - default='0.5:1.0:0.05', - help='iou thresholds, default 0.5:1.0:0.05, upper bound is excluded') - evaluate_arg_parser.add_argument('--need-pr-curve', - dest='need_pr_curve', - action='store_true', - help='also generates pr curve in evaluation result') - evaluate_arg_parser.set_defaults(func=CmdEvaluate) diff --git a/ymir/command/mir/commands/export.py b/ymir/command/mir/commands/export.py new file mode 100644 index 0000000000..ae1af03032 --- /dev/null +++ b/ymir/command/mir/commands/export.py @@ -0,0 +1,162 @@ +import argparse +import logging +import time +from typing import List + +from mir.commands import base +from mir.protos import mir_command_pb2 as mirpb +from mir.tools import annotations, checker, exporter, mir_repo_utils, mir_storage_ops, revs_parser +from mir.tools.class_ids import load_or_create_userlabels +from mir.tools.code import MirCode +from mir.tools.command_run_in_out import command_run_in_out +from mir.tools.errors import MirRuntimeError +from mir.tools.phase_logger import PhaseLoggerCenter + + +class CmdExport(base.BaseCommand): + def run(self) -> int: + logging.debug(f"command export: {self.args}") + + return CmdExport.run_with_args( + mir_root=self.args.mir_root, + asset_dir=self.args.asset_dir, + pred_dir=self.args.pred_dir, + gt_dir=self.args.gt_dir, + media_location=self.args.media_location, + src_revs=self.args.src_revs, + dst_rev=f"export-{self.args.src_revs}-{time.time()}", + asset_format=exporter.parse_asset_format(self.args.asset_format), + anno_format=annotations.parse_anno_format(self.args.anno_format), + class_names=self.args.class_names.split(';') if self.args.class_names else [], + work_dir=self.args.work_dir, + ) + + @staticmethod + @command_run_in_out + def run_with_args( + mir_root: str, + asset_dir: str, + pred_dir: str, + gt_dir: str, + media_location: str, + src_revs: str, + dst_rev: str, + asset_format: "mirpb.AssetFormat.V", + anno_format: "mirpb.AnnoFormat.V", + class_names: List[str], + work_dir: str, + ) -> int: + if not asset_dir or not media_location or not src_revs: + logging.error('empty --asset-dir, --media-location or --src-revs') + return MirCode.RC_CMD_INVALID_ARGS + + src_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) + dst_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=False) + + PhaseLoggerCenter.create_phase_loggers(top_phase='export', + monitor_file=mir_repo_utils.work_dir_to_monitor_file(work_dir), + task_name='default-task') + + check_code = checker.check(mir_root, prerequisites=[checker.Prerequisites.IS_INSIDE_MIR_REPO]) + if check_code != MirCode.RC_OK: + return check_code + + # prepare + cls_mgr = load_or_create_userlabels(mir_root=mir_root) + class_ids_list, unknown_names = cls_mgr.id_for_names(class_names) + if unknown_names: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"unknown class names: {unknown_names}") + class_ids_mapping = {class_id: class_id for class_id in class_ids_list} + + mir_metadatas: mirpb.MirMetadatas + mir_annotations: mirpb.MirAnnotations + mir_metadatas, mir_annotations = mir_storage_ops.MirStorageOps.load_multiple_storages( + mir_root=mir_root, + mir_branch=src_rev_tid.rev, + mir_task_id=src_rev_tid.tid, + ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS]) + + ec = mirpb.ExportConfig(asset_format=asset_format, + asset_dir=asset_dir, + media_location=media_location, + need_sub_folder=True, + anno_format=anno_format, + gt_dir=gt_dir, + pred_dir=pred_dir,) + export_code = exporter.export_mirdatas_to_dir( + mir_metadatas=mir_metadatas, + ec=ec, + mir_annotations=mir_annotations, + class_ids_mapping=class_ids_mapping, + cls_id_mgr=cls_mgr, + ) + if export_code != MirCode.RC_OK: + return export_code + + # add task result commit + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeExportData, + task_id=dst_rev_tid.tid, + message=f"export from {src_rev_tid.rev_tid}") + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, + mir_branch=dst_rev_tid.rev, + his_branch=src_rev_tid.rev, + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mirpb.MirMetadatas(), + mirpb.MirStorage.MIR_ANNOTATIONS: mirpb.MirAnnotations() + }, + task=task) + + return MirCode.RC_OK + + +def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: + exporting_arg_parser = subparsers.add_parser('export', + parents=[parent_parser], + description='use this command to export data', + help='export data') + exporting_arg_parser.add_argument("--asset-dir", + required=True, + dest="asset_dir", + type=str, + help="export directory for assets") + exporting_arg_parser.add_argument("--pred-dir", + required=False, + dest="pred_dir", + type=str, + help="export directory for prediction") + exporting_arg_parser.add_argument("--gt-dir", + required=False, + dest="gt_dir", + type=str, + help="export directory for ground-truth") + exporting_arg_parser.add_argument('--media-location', + required=True, + dest='media_location', + type=str, + help='location of hashed assets') + exporting_arg_parser.add_argument('--src-revs', + required=True, + dest='src_revs', + type=str, + help='rev@bid: source rev and base task id') + exporting_arg_parser.add_argument('--anno-format', + dest='anno_format', + type=str, + default="none", + choices=["none", "det-ark", "det-voc", "det-ls-json", "seg-poly", "seg-mask"], + help='annotation format: ark / voc / none') + exporting_arg_parser.add_argument('--asset-format', + dest='asset_format', + type=str, + default='raw', + choices=['raw', 'lmdb'], + help='asset format: raw / lmdb') + exporting_arg_parser.add_argument('--class_names', + dest="class_names", + type=str, + required=False, + default='', + help="class names, do not set if you want to export all types") + exporting_arg_parser.add_argument('-w', dest='work_dir', type=str, required=False, help='working directory') + exporting_arg_parser.set_defaults(func=CmdExport) diff --git a/ymir/command/mir/commands/exporting.py b/ymir/command/mir/commands/exporting.py deleted file mode 100644 index f3a97e036a..0000000000 --- a/ymir/command/mir/commands/exporting.py +++ /dev/null @@ -1,145 +0,0 @@ -import argparse -import logging -import time - -from mir.commands import base -from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, class_ids, data_exporter, mir_repo_utils, mir_storage_ops, revs_parser -from mir.tools.code import MirCode -from mir.tools.command_run_in_out import command_run_in_out -from mir.tools.errors import MirRuntimeError -from mir.tools.phase_logger import PhaseLoggerCenter - - -class CmdExport(base.BaseCommand): - def run(self) -> int: - logging.debug(f"command export: {self.args}") - - dst_rev = self.args.dst_rev - if not dst_rev: - task_id = f"exporting-task-{float(time.time())}" - dst_rev = f"{task_id}@{task_id}" - - return CmdExport.run_with_args(mir_root=self.args.mir_root, - asset_dir=self.args.asset_dir, - annotation_dir=self.args.annotation_dir, - media_location=self.args.media_location, - src_revs=self.args.src_revs, - dst_rev=dst_rev, - in_cis=self.args.in_cis, - work_dir=self.args.work_dir, - format=self.args.format) - - @staticmethod - @command_run_in_out - def run_with_args(mir_root: str, asset_dir: str, annotation_dir: str, media_location: str, src_revs: str, - format: str, in_cis: str, work_dir: str, dst_rev: str) -> int: - # check args - if not format: - format = 'none' - - if not asset_dir or not annotation_dir or not media_location or not src_revs: - logging.error('empty --asset-dir, --annotation-dir, --media-location or --src-revs') - return MirCode.RC_CMD_INVALID_ARGS - if format and (not data_exporter.check_support_format(format)): - logging.error(f"invalid --format: {format}") - return MirCode.RC_CMD_INVALID_ARGS - - src_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) - dst_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) - - PhaseLoggerCenter.create_phase_loggers(top_phase='export', - monitor_file=mir_repo_utils.work_dir_to_monitor_file(work_dir), - task_name='default-task') - - check_code = checker.check(mir_root, prerequisites=[checker.Prerequisites.IS_INSIDE_MIR_REPO]) - if check_code != MirCode.RC_OK: - return check_code - - format_type = data_exporter.format_type_from_str(format) - - # asset ids - mir_metadatas: mirpb.MirMetadatas = mir_storage_ops.MirStorageOps.load_single_storage( - mir_root=mir_root, mir_branch=src_rev_tid.rev, mir_task_id=src_rev_tid.tid, ms=mirpb.MIR_METADATAS) - asset_ids = set() - for k in mir_metadatas.attributes.keys(): - asset_ids.add(str(k)) - if not asset_ids: - logging.error('nothing to export') - return MirCode.RC_CMD_INVALID_ARGS - - cls_mgr = class_ids.ClassIdManager(mir_root=mir_root) - class_names = in_cis.split(';') if in_cis else [] - type_ids_list, unknown_names = cls_mgr.id_for_names(class_names) - if unknown_names: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"unknown class names: {unknown_names}") - - # export - data_exporter.export(mir_root=mir_root, - assets_location=media_location, - class_type_ids={type_id: type_id - for type_id in type_ids_list}, - asset_ids=asset_ids, - asset_dir=asset_dir, - annotation_dir=annotation_dir, - need_ext=True, - need_id_sub_folder=False, - base_branch=src_rev_tid.rev, - base_task_id=src_rev_tid.tid, - format_type=format_type) - - # add task result commit - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeExportData, - task_id=dst_rev_tid.tid, - message=f"export from {src_rev_tid.rev_tid}") - mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, - mir_branch=dst_rev_tid.rev, - his_branch=src_rev_tid.rev, - mir_datas={}, - task=task) - - return MirCode.RC_OK - - -def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: - exporting_arg_parser = subparsers.add_parser('export', - parents=[parent_parser], - description='use this command to export data', - help='export data') - exporting_arg_parser.add_argument("--asset-dir", - required=True, - dest="asset_dir", - type=str, - help="export directory for assets") - exporting_arg_parser.add_argument("--annotation-dir", - required=True, - dest="annotation_dir", - type=str, - help="export directory for annotations") - exporting_arg_parser.add_argument('--media-location', - required=True, - dest='media_location', - type=str, - help='location of hashed assets') - exporting_arg_parser.add_argument('--src-revs', - required=True, - dest='src_revs', - type=str, - help='rev@bid: source rev and base task id') - exporting_arg_parser.add_argument("--dst-rev", required=False, dest="dst_rev", type=str, help="rev@tid") - exporting_arg_parser.add_argument('--format', - dest='format', - type=str, - default="none", - choices=data_exporter.support_format_type(), - help='annotation format: ark / voc / none') - exporting_arg_parser.add_argument("-p", - '--cis', - dest="in_cis", - type=str, - required=False, - default='', - help="type names, do not set if you want to export all types") - exporting_arg_parser.add_argument('-w', dest='work_dir', type=str, required=False, help='working directory') - exporting_arg_parser.set_defaults(func=CmdExport) diff --git a/ymir/command/mir/commands/filter.py b/ymir/command/mir/commands/filter.py index f37a28f249..aaff39fdda 100644 --- a/ymir/command/mir/commands/filter.py +++ b/ymir/command/mir/commands/filter.py @@ -1,23 +1,21 @@ import argparse import logging -from typing import Any, Callable, List, Tuple, Optional, Set, Union +from typing import Optional, Set from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, class_ids, mir_repo_utils, mir_storage, mir_storage_ops, revs_parser +from mir.tools import annotations, checker, class_ids +from mir.tools import mir_repo_utils, mir_storage, mir_storage_ops, revs_parser from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out from mir.tools.errors import MirRuntimeError from mir.tools.phase_logger import PhaseLoggerCenter -# type for function `__include_match` and `__exclude_match` -__IncludeExcludeCallableType = Callable[[Set[str], mirpb.MirKeywords, str, Any], Set[str]] - class CmdFilter(base.BaseCommand): # private: misc @staticmethod - def __preds_set_from_str(preds_str: str, cls_mgr: class_ids.ClassIdManager) -> Set[int]: + def __class_ids_set_from_str(preds_str: str, cls_mgr: class_ids.UserLabels) -> Set[int]: if not preds_str: return set() @@ -29,52 +27,49 @@ def __preds_set_from_str(preds_str: str, cls_mgr: class_ids.ClassIdManager) -> S return set(class_ids) - @staticmethod - def __include_match(asset_ids_set: Set[str], mir_keywords: mirpb.MirKeywords, attr_name: str, - in_set: Set[Union[int, str]]) -> Set[str]: + @classmethod + def __include_match(cls, asset_ids_set: Set[str], mir_keywords: mirpb.MirKeywords, + in_cis_set: Set[int]) -> Set[str]: # if don't need include match, returns all - if not in_set: + if not in_cis_set: return asset_ids_set - matched_asset_ids_set = set() # type: Set[str] - for asset_id in asset_ids_set: - if asset_id not in mir_keywords.keywords: - continue - - keyids_set = set(getattr(mir_keywords.keywords[asset_id], attr_name)) - if not keyids_set: - continue + asset_ids_set = set() + for ci in in_cis_set: + if ci in mir_keywords.pred_idx.cis: + asset_ids_set.update(mir_keywords.pred_idx.cis[ci].key_ids.keys()) + if ci in mir_keywords.gt_idx.cis: + asset_ids_set.update(mir_keywords.gt_idx.cis[ci].key_ids.keys()) + return asset_ids_set + + @classmethod + def __exclude_match(cls, asset_ids_set: Set[str], mir_keywords: mirpb.MirKeywords, + ex_cis_set: Set[int]) -> Set[str]: + if not ex_cis_set: + return asset_ids_set - if keyids_set & in_set: - matched_asset_ids_set.add(asset_id) - return matched_asset_ids_set + for ci in ex_cis_set: + if ci in mir_keywords.pred_idx.cis: + asset_ids_set.difference_update(mir_keywords.pred_idx.cis[ci].key_ids.keys()) + if ci in mir_keywords.gt_idx.cis: + asset_ids_set.difference_update(mir_keywords.gt_idx.cis[ci].key_ids.keys()) + return asset_ids_set @staticmethod - def __exclude_match(asset_ids_set: Set[str], mir_keywords: mirpb.MirKeywords, attr_name: str, - ex_set: Set[Union[int, str]]) -> Set[str]: - # if don't need excludes filter, returns all - if not ex_set: - return asset_ids_set - - matched_asset_ids_set = set() # type: Set[str] - for asset_id in asset_ids_set: - if asset_id in mir_keywords.keywords: - keyids_set = set(getattr(mir_keywords.keywords[asset_id], attr_name)) - if keyids_set & ex_set: - continue - matched_asset_ids_set.add(asset_id) - return matched_asset_ids_set + def __gen_task_annotations(src_task_annotations: mirpb.SingleTaskAnnotations, + dst_task_annotations: mirpb.SingleTaskAnnotations, asset_ids: Set[str]) -> None: + joint_ids = asset_ids & src_task_annotations.image_annotations.keys() + for asset_id in joint_ids: + dst_task_annotations.image_annotations[asset_id].CopyFrom(src_task_annotations.image_annotations[asset_id]) # public: run cmd @staticmethod @command_run_in_out - def run_with_args(mir_root: str, in_cis: Optional[str], ex_cis: Optional[str], in_cks: Optional[str], - ex_cks: Optional[str], src_revs: str, dst_rev: str, work_dir: str) -> int: # type: ignore + def run_with_args(mir_root: str, in_cis: Optional[str], ex_cis: Optional[str], src_revs: str, dst_rev: str, + work_dir: str) -> int: # type: ignore # check args in_cis = in_cis.strip().lower() if in_cis else '' ex_cis = ex_cis.strip().lower() if ex_cis else '' - in_cks = in_cks.strip() if in_cks else '' - ex_cks = ex_cks.strip() if ex_cks else '' src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) dst_typ_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) @@ -88,6 +83,8 @@ def run_with_args(mir_root: str, in_cis: Optional[str], ex_cis: Optional[str], i if return_code != MirCode.RC_OK: return return_code + PhaseLoggerCenter.update_phase(phase="filter.init") + [mir_metadatas, mir_annotations, mir_keywords, mir_tasks, _] = mir_storage_ops.MirStorageOps.load_multiple_storages(mir_root=mir_root, mir_branch=src_typ_rev_tid.rev, @@ -95,42 +92,25 @@ def run_with_args(mir_root: str, in_cis: Optional[str], ex_cis: Optional[str], i ms_list=mir_storage.get_all_mir_storage(), as_dict=False) task_id = dst_typ_rev_tid.tid - base_task_id = mir_annotations.head_task_id PhaseLoggerCenter.update_phase(phase='filter.read') - if task_id in mir_tasks.tasks: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_BRANCH_OR_TAG, - error_message=f"invalid args: task id already exists: {task_id}") - if not base_task_id: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_MIR_REPO, - error_message='no base task id in tasks.mir') - - assert len(mir_annotations.task_annotations.keys()) == 1 - base_task_annotations = mir_annotations.task_annotations[base_task_id] # type: mirpb.SingleTaskAnnotations - - class_manager = class_ids.ClassIdManager(mir_root=mir_root) - preds_set = CmdFilter.__preds_set_from_str(in_cis, class_manager) # type: Set[int] - excludes_set = CmdFilter.__preds_set_from_str(ex_cis, class_manager) # type: Set[int] - ck_preds_set = {ck.strip() for ck in in_cks.split(";")} if in_cks else set() - ck_excludes_set = {ck.strip() for ck in ex_cks.split(";")} if ex_cks else set() + class_manager = class_ids.load_or_create_userlabels(mir_root=mir_root) + in_cis_set: Set[int] = CmdFilter.__class_ids_set_from_str(in_cis, class_manager) + ex_cis_set: Set[int] = CmdFilter.__class_ids_set_from_str(ex_cis, class_manager) asset_ids_set = set(mir_metadatas.attributes.keys()) - match_functions: List[Tuple[__IncludeExcludeCallableType, Union[Set[str], Set[int]], str, str]] = [ - (CmdFilter.__include_match, preds_set, 'predifined_keyids', 'select cis'), - (CmdFilter.__exclude_match, excludes_set, 'predifined_keyids', 'exclude cis'), - (CmdFilter.__include_match, ck_preds_set, 'customized_keywords', 'select cks'), - (CmdFilter.__exclude_match, ck_excludes_set, 'customized_keywords', 'exclude cks') - ] - for match_func, ci_ck_conditions, attr_name, message in match_functions: - if ci_ck_conditions: - logging.info(f"assets count before {message}: {len(asset_ids_set)}") - asset_ids_set = match_func(asset_ids_set, mir_keywords, attr_name, ci_ck_conditions) - logging.info(f"assets count after {message}: {len(asset_ids_set)}") + asset_ids_set = CmdFilter.__include_match(asset_ids_set=asset_ids_set, + mir_keywords=mir_keywords, + in_cis_set=in_cis_set) + logging.info(f"assets count after include match: {len(asset_ids_set)}") + asset_ids_set = CmdFilter.__exclude_match(asset_ids_set=asset_ids_set, + mir_keywords=mir_keywords, + ex_cis_set=ex_cis_set) + logging.info(f"assets count after exclude match: {len(asset_ids_set)}") matched_mir_metadatas = mirpb.MirMetadatas() matched_mir_annotations = mirpb.MirAnnotations() - matched_mir_keywords = mirpb.MirKeywords() # generate matched metadatas, annotations and keywords for asset_id in asset_ids_set: @@ -138,16 +118,20 @@ def run_with_args(mir_root: str, in_cis: Optional[str], ex_cis: Optional[str], i asset_attr = mir_metadatas.attributes[asset_id] matched_mir_metadatas.attributes[asset_id].CopyFrom(asset_attr) - joint_ids = asset_ids_set & mir_keywords.keywords.keys() - for asset_id in joint_ids: - # generate `matched_mir_keywords` - matched_mir_keywords.keywords[asset_id].CopyFrom(mir_keywords.keywords[asset_id]) - # generate `matched_mir_annotations` - joint_ids = asset_ids_set & base_task_annotations.image_annotations.keys() - for asset_id in joint_ids: - matched_mir_annotations.task_annotations[task_id].image_annotations[asset_id].CopyFrom( - base_task_annotations.image_annotations[asset_id]) + CmdFilter.__gen_task_annotations(src_task_annotations=mir_annotations.ground_truth, + dst_task_annotations=matched_mir_annotations.ground_truth, + asset_ids=asset_ids_set) + CmdFilter.__gen_task_annotations(src_task_annotations=mir_annotations.prediction, + dst_task_annotations=matched_mir_annotations.prediction, + asset_ids=asset_ids_set) + + image_ck_asset_ids = asset_ids_set & set(mir_annotations.image_cks.keys()) + for asset_id in image_ck_asset_ids: + matched_mir_annotations.image_cks[asset_id].CopyFrom(mir_annotations.image_cks[asset_id]) + + annotations.copy_annotations_pred_meta(src_task_annotations=mir_annotations.prediction, + dst_task_annotations=matched_mir_annotations.prediction) logging.info("matched: %d, overriding current mir repo", len(matched_mir_metadatas.attributes)) @@ -177,8 +161,6 @@ def run(self) -> int: return CmdFilter.run_with_args(mir_root=self.args.mir_root, in_cis=self.args.in_cis, ex_cis=self.args.ex_cis, - in_cks=self.args.in_cks, - ex_cks=self.args.ex_cks, src_revs=self.args.src_revs, dst_rev=self.args.dst_rev, work_dir=self.args.work_dir) @@ -189,10 +171,8 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar parents=[parent_parser], description="use this command to filter assets", help="filter assets") - filter_arg_parser.add_argument("-p", dest="in_cis", type=str, help="type names") - filter_arg_parser.add_argument("-P", dest="ex_cis", type=str, help="exclusive type names") - filter_arg_parser.add_argument("-c", dest="in_cks", type=str, help="customized keywords") - filter_arg_parser.add_argument("-C", dest="ex_cks", type=str, help="excludsive customized keywords") + filter_arg_parser.add_argument("-p", '--cis', dest="in_cis", type=str, help="type names") + filter_arg_parser.add_argument("-P", '--ex-cis', dest="ex_cis", type=str, help="exclusive type names") filter_arg_parser.add_argument("--src-revs", dest="src_revs", type=str, help="type:rev@bid") filter_arg_parser.add_argument("--dst-rev", dest="dst_rev", type=str, help="rev@tid") filter_arg_parser.add_argument('-w', dest='work_dir', type=str, required=False, help='working directory') diff --git a/ymir/command/mir/commands/import_dataset.py b/ymir/command/mir/commands/import_dataset.py new file mode 100644 index 0000000000..73c6674311 --- /dev/null +++ b/ymir/command/mir/commands/import_dataset.py @@ -0,0 +1,208 @@ +import argparse +import logging +import os +import shutil +from typing import Dict + +from mir.commands import base +from mir.protos import mir_command_pb2 as mirpb +from mir.tools import annotations, checker, metadatas +from mir.tools import mir_repo_utils, mir_storage_ops, revs_parser, settings +from mir.tools.code import MirCode +from mir.tools.command_run_in_out import command_run_in_out +from mir.tools.phase_logger import PhaseLoggerCenter +from mir.tools.mir_storage import get_asset_storage_path, sha1sum_for_file + + +class CmdImport(base.BaseCommand): + def run(self) -> int: + logging.debug("command import: %s", self.args) + + return CmdImport.run_with_args(mir_root=self.args.mir_root, + index_file=self.args.index_file, + pred_abs=self.args.pred_dir, + gt_abs=self.args.gt_dir, + gen_abs=self.args.gen_abs, + dst_rev=self.args.dst_rev, + src_revs=self.args.src_revs or 'master', + work_dir=self.args.work_dir, + unknown_types_strategy=annotations.UnknownTypesStrategy( + self.args.unknown_types_strategy), + anno_type=annotations.parse_anno_type(self.args.anno_type)) + + @staticmethod + @command_run_in_out + def run_with_args(mir_root: str, index_file: str, pred_abs: str, gt_abs: str, gen_abs: str, + dst_rev: str, src_revs: str, work_dir: str, + unknown_types_strategy: annotations.UnknownTypesStrategy, anno_type: "mirpb.AnnoType.V") -> int: + # Step 1: check args and prepare environment. + if not index_file or not gen_abs or not os.path.isfile(index_file): + logging.error(f"invalid index_file: {index_file} or gen_abs: {gen_abs}") + return MirCode.RC_CMD_INVALID_ARGS + if pred_abs and not os.path.isdir(pred_abs): + logging.error(f"prediction dir invalid: {pred_abs}") + return MirCode.RC_CMD_INVALID_ARGS + if gt_abs and not os.path.isdir(gt_abs): + logging.error(f"groundtruth dir invalid: {gt_abs}") + return MirCode.RC_CMD_INVALID_ARGS + dst_typ_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) + src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) + + PhaseLoggerCenter.create_phase_loggers(top_phase='import', + monitor_file=mir_repo_utils.work_dir_to_monitor_file(work_dir), + task_name=dst_typ_rev_tid.tid) + + check_code = checker.check(mir_root, + [checker.Prerequisites.IS_INSIDE_MIR_REPO, checker.Prerequisites.HAVE_LABELS]) + if check_code != MirCode.RC_OK: + return check_code + + PhaseLoggerCenter.update_phase(phase="import.init") + + # Step 2: generate sha1 file and rename images. + # sha1 file to be written. + map_hashed_filename: Dict[str, str] = {} + ret = _generate_sha_and_copy(index_file, map_hashed_filename, gen_abs) + if ret != MirCode.RC_OK: + logging.error(f"generate hash error: {ret}") + return ret + + # Step 3 import metadat and annotations: + mir_metadatas = mirpb.MirMetadatas() + ret = metadatas.import_metadatas(mir_metadatas=mir_metadatas, + map_hashed_filename=map_hashed_filename, + hashed_asset_root=gen_abs, + phase='import.metadatas') + if ret != MirCode.RC_OK: + logging.error(f"import metadatas error: {ret}") + return ret + + mir_annotation = mirpb.MirAnnotations() + unknown_class_names = annotations.import_annotations(mir_annotation=mir_annotation, + mir_root=mir_root, + prediction_dir_path=pred_abs, + groundtruth_dir_path=gt_abs, + map_hashed_filename=map_hashed_filename, + unknown_types_strategy=unknown_types_strategy, + anno_type=anno_type, + phase='import.others') + + logging.info(f"pred / gt import unknown result: {unknown_class_names}") + + # create and write tasks + task = mir_storage_ops.create_task( + task_type=mirpb.TaskTypeImportData, + task_id=dst_typ_rev_tid.tid, + message=f"importing {index_file}-{pred_abs}-{gt_abs} to {dst_rev}, uts: {unknown_types_strategy}", + new_types=unknown_class_names, + new_types_added=(unknown_types_strategy == annotations.UnknownTypesStrategy.ADD), + src_revs=src_revs, + dst_rev=dst_rev, + ) + + mir_data = { + mirpb.MirStorage.MIR_METADATAS: mir_metadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotation, + } + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, + his_branch=src_typ_rev_tid.rev, + mir_branch=dst_typ_rev_tid.rev, + mir_datas=mir_data, + task=task) + + return MirCode.RC_OK + + +def _generate_sha_and_copy(index_file: str, map_hashed_filename: Dict[str, str], sha_folder: str) -> int: + hash_phase_name = 'import.hash' + os.makedirs(sha_folder, exist_ok=True) + + with open(index_file) as idx_f: + lines = idx_f.readlines() + total_count = len(lines) + if total_count > settings.ASSET_LIMIT_PER_DATASET: # large number of images may trigger redis timeout error. + logging.error(f'# of image {total_count} exceeds upper boundary {settings.ASSET_LIMIT_PER_DATASET}.') + return MirCode.RC_CMD_INVALID_ARGS + + idx = 0 + copied_assets = 0 + for line in lines: + components = line.strip().split('\t') + if not components: + continue + media_src = components[0] + if not os.path.isfile(media_src): + continue + + try: + sha1 = sha1sum_for_file(media_src) + except OSError: + logging.info(f"{media_src} is not accessable.") + continue + + if sha1 not in map_hashed_filename: + map_hashed_filename[sha1] = os.path.splitext(os.path.basename(media_src))[0] + media_dst = get_asset_storage_path(location=sha_folder, hash=sha1) + if not os.path.isfile(media_dst): + copied_assets += 1 + shutil.copyfile(media_src, media_dst) + + idx += 1 + if idx % 5000 == 0: + PhaseLoggerCenter.update_phase(phase=hash_phase_name, local_percent=(idx / total_count)) + logging.info(f"finished {idx} / {total_count} hashes") + + logging.info(f"skipped assets: {len(lines) - len(map_hashed_filename)}\ncopied assets: {copied_assets}") + PhaseLoggerCenter.update_phase(phase=hash_phase_name) + return MirCode.RC_OK + + +def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: + import_dataset_arg_parser = subparsers.add_parser( + "import", + parents=[parent_parser], + description="use this command to import data from img/anno folder", + help="import raw data", + formatter_class=argparse.RawTextHelpFormatter) + import_dataset_arg_parser.add_argument("--index-file", + dest="index_file", + required=True, + type=str, + help="index of input media, one file per line") + import_dataset_arg_parser.add_argument("--pred-dir", + dest="pred_dir", + type=str, + required=False, + help="corresponding prediction folder") + import_dataset_arg_parser.add_argument("--gt-dir", + dest="gt_dir", + type=str, + required=False, + help="corresponding ground-truth folder") + import_dataset_arg_parser.add_argument("--gen-dir", + dest="gen_abs", + required=True, + type=str, + help="storage path of generated data files") + import_dataset_arg_parser.add_argument("--src-revs", dest="src_revs", type=str, help="rev: source rev") + import_dataset_arg_parser.add_argument("--dst-rev", + dest="dst_rev", + type=str, + required=True, + help="rev@tid: destination branch name and task id") + import_dataset_arg_parser.add_argument('-w', dest='work_dir', type=str, required=False, help='working directory') + import_dataset_arg_parser.add_argument('--unknown-types-strategy', + dest='unknown_types_strategy', + required=False, + choices=['stop', 'ignore', 'add'], + default='stop', + help='strategy for unknown class types in annotation files\n' + 'stop: stop on unknown class type names\n' + 'ignore: ignore unknown class type names\n' + 'add: add unknown class types names to labels.yaml') + import_dataset_arg_parser.add_argument('--anno-type', + dest='anno_type', + required=True, + choices=['det-box', 'seg-poly', 'seg-mask'], + help='annotations type\n') + import_dataset_arg_parser.set_defaults(func=CmdImport) diff --git a/ymir/command/mir/commands/model_importing.py b/ymir/command/mir/commands/import_model.py similarity index 76% rename from ymir/command/mir/commands/model_importing.py rename to ymir/command/mir/commands/import_model.py index 0f82fa97a8..90f9145558 100644 --- a/ymir/command/mir/commands/model_importing.py +++ b/ymir/command/mir/commands/import_model.py @@ -2,13 +2,15 @@ import logging import os import shutil +import tarfile +from mir.version import DEFAULT_YMIR_SRC_VERSION, YMIR_VERSION, ymir_model_salient_version import yaml from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, mir_storage_ops, revs_parser -from mir.tools import settings as mir_settings, utils as mir_utils +from mir.tools import checker, mir_storage_ops, models, revs_parser +from mir.tools import settings as mir_settings from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out from mir.tools.errors import MirRuntimeError @@ -48,22 +50,27 @@ def run_with_args(mir_root: str, dst_rev: str, src_revs: str, work_dir: str, pac # unpack extract_model_dir_path = os.path.join(work_dir, 'model') - model_storage = mir_utils.prepare_model(model_location=os.path.dirname(package_path), - model_hash=os.path.basename(package_path), - dst_model_path=extract_model_dir_path) + with tarfile.open(package_path, 'r') as tf: + tf.extractall(extract_model_dir_path) - logging.info(f"importing model with storage: {model_storage}") + with open(os.path.join(extract_model_dir_path, 'ymir-info.yaml'), 'r') as f: + ymir_info_dict = yaml.safe_load(f.read()) + + package_version = ymir_info_dict.get('package_version', DEFAULT_YMIR_SRC_VERSION) + if ymir_model_salient_version(package_version) != ymir_model_salient_version(YMIR_VERSION): + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_MODEL_PACKAGE_VERSION, + error_message=f"Invalid model package version: {package_version}") + model_storage = models.ModelStorage.parse_obj(ymir_info_dict) - # check - _check_model(model_storage=model_storage, mir_root=mir_root) + logging.info(f"importing model with storage: {model_storage}") # update model_storage and pack model_storage.task_context['src-revs'] = src_revs model_storage.task_context['dst_rev'] = dst_rev model_storage.task_context['type'] = mirpb.TaskType.TaskTypeImportModel - model_hash = mir_utils.pack_and_copy_models(model_storage=model_storage, - model_dir_path=extract_model_dir_path, - model_location=model_location) + models.pack_and_copy_models(model_storage=model_storage, + model_dir_path=extract_model_dir_path, + model_location=model_location) # remove tmp files shutil.rmtree(extract_model_dir_path) @@ -71,9 +78,8 @@ def run_with_args(mir_root: str, dst_rev: str, src_revs: str, work_dir: str, pac # create task and commit task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportModel, task_id=dst_typ_rev_tid.tid, - message=f"import model {package_path} as {model_hash}", - model_hash=model_hash, - model_mAP=float(model_storage.task_context.get('mAP', 0)), + message=f"import model {package_path} as {model_storage.model_hash}", + model_meta=model_storage.get_model_meta(), return_code=MirCode.RC_OK, return_msg='', src_revs=src_revs, @@ -85,22 +91,15 @@ def run_with_args(mir_root: str, dst_rev: str, src_revs: str, work_dir: str, pac mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, mir_branch=dst_typ_rev_tid.rev, his_branch=src_typ_rev_tid.rev, - mir_datas={}, + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mirpb.MirMetadatas(), + mirpb.MirStorage.MIR_ANNOTATIONS: mirpb.MirAnnotations() + }, task=task) return MirCode.RC_OK -def _check_model(model_storage: mir_utils.ModelStorage, mir_root: str) -> int: - # check producer - producer = model_storage.task_context.get(mir_settings.PRODUCER_KEY, None) - if producer != mir_settings.PRODUCER_NAME: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_FILE, - error_message=f"can not import model, invalid producer: {producer}") - - return MirCode.RC_OK - - def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: importing_arg_parser = subparsers.add_parser("models", parents=[parent_parser], diff --git a/ymir/command/mir/commands/importing.py b/ymir/command/mir/commands/importing.py deleted file mode 100644 index d1d9508c98..0000000000 --- a/ymir/command/mir/commands/importing.py +++ /dev/null @@ -1,187 +0,0 @@ -import argparse -import logging -import json -import os -import random -import shutil - -from mir.commands import base -from mir.protos import mir_command_pb2 as mirpb -from mir.tools import annotations, checker, hash_utils, metadatas, mir_repo_utils, mir_storage_ops, revs_parser -from mir.tools.code import MirCode -from mir.tools.command_run_in_out import command_run_in_out -from mir.tools.errors import MirRuntimeError -from mir.tools.phase_logger import PhaseLoggerCenter - - -class CmdImport(base.BaseCommand): - def run(self) -> int: - logging.debug("command import: %s", self.args) - - return CmdImport.run_with_args(mir_root=self.args.mir_root, - index_file=self.args.index_file, - anno_abs=self.args.anno, - gen_abs=self.args.gen, - dataset_name=self.args.dataset_name, - dst_rev=self.args.dst_rev, - src_revs=self.args.src_revs or 'master', - work_dir=self.args.work_dir, - ignore_unknown_types=self.args.ignore_unknown_types) - - @staticmethod - @command_run_in_out - def run_with_args(mir_root: str, index_file: str, anno_abs: str, gen_abs: str, dataset_name: str, - dst_rev: str, src_revs: str, work_dir: str, ignore_unknown_types: bool) -> int: - # Step 1: check args and prepare environment. - if not index_file or not gen_abs or not os.path.isfile(index_file): - logging.error(f"invalid index_file: {index_file} or gen_abs: {gen_abs}") - return MirCode.RC_CMD_INVALID_ARGS - if anno_abs and not os.path.isdir(anno_abs): - logging.error(f"annotations dir invalid: {anno_abs}") - return MirCode.RC_CMD_INVALID_ARGS - dst_typ_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) - src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) - - if not dataset_name: - dataset_name = dst_typ_rev_tid.tid - - PhaseLoggerCenter.create_phase_loggers(top_phase='import', - monitor_file=mir_repo_utils.work_dir_to_monitor_file(work_dir), - task_name=dst_typ_rev_tid.tid) - - check_code = checker.check(mir_root, - [checker.Prerequisites.IS_INSIDE_MIR_REPO, checker.Prerequisites.HAVE_LABELS]) - if check_code != MirCode.RC_OK: - return check_code - - # Step 2: generate sha1 file and rename images. - # sha1 file to be written. - sha1_index_abs = os.path.join( - gen_abs, f"{os.path.basename(index_file)}-{dst_typ_rev_tid.rev}-{random.randint(0, 100)}.sha1") - ret = _generate_sha_and_copy(index_file, sha1_index_abs, gen_abs) - if ret != MirCode.RC_OK: - logging.error(f"generate hash error: {ret}") - return ret - - # Step 3 import metadat and annotations: - mir_metadatas = mirpb.MirMetadatas() - ret = metadatas.import_metadatas(mir_metadatas=mir_metadatas, - dataset_name=dataset_name, - in_sha1_path=sha1_index_abs, - hashed_asset_root=gen_abs, - phase='import.metadatas') - if ret != MirCode.RC_OK: - logging.error(f"import metadatas error: {ret}") - return ret - - mir_annotation = mirpb.MirAnnotations() - ret_code, unknown_types = annotations.import_annotations(mir_metadatas=mir_metadatas, - mir_annotation=mir_annotation, - in_sha1_file=sha1_index_abs, - mir_root=mir_root, - annotations_dir_path=anno_abs, - task_id=dst_typ_rev_tid.tid, - phase='import.others') - if ret_code != MirCode.RC_OK: - logging.error(f"import annotations error: {ret_code}") - return ret_code - if unknown_types: - if ignore_unknown_types: - logging.warning(f"unknown types: {unknown_types}") - else: - raise MirRuntimeError(MirCode.RC_CMD_UNKNOWN_TYPES, json.dumps(unknown_types)) - - # create and write tasks - task = mir_storage_ops.create_task(task_type=mirpb.TaskTypeImportData, - task_id=dst_typ_rev_tid.tid, - message=f"importing {index_file}-{anno_abs}-{gen_abs} as {dataset_name}", - unknown_types=unknown_types, - src_revs=src_revs, - dst_rev=dst_rev) - - mir_data = { - mirpb.MirStorage.MIR_METADATAS: mir_metadatas, - mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotation, - } - mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, - his_branch=src_typ_rev_tid.rev, - mir_branch=dst_typ_rev_tid.rev, - mir_datas=mir_data, - task=task) - - # cleanup - os.remove(sha1_index_abs) - - return MirCode.RC_OK - - -def _generate_sha_and_copy(index_file: str, sha_idx_file: str, sha_folder: str) -> int: - hash_phase_name = 'import.hash' - if not os.path.isfile(index_file): - logging.error('invalid index_file') - return MirCode.RC_CMD_INVALID_ARGS - - os.makedirs(sha_folder, exist_ok=True) - - with open(index_file) as idx_f, open(sha_idx_file, 'w') as sha_f: - lines = idx_f.readlines() - total_count = len(lines) - asset_count_limit = 1000000 - if total_count > asset_count_limit: # large number of images may trigger redis timeout error. - logging.error(f'# of image {total_count} exceeds upper boundary {asset_count_limit}.') - return MirCode.RC_CMD_INVALID_ARGS - - idx = 0 - for line in lines: - media_src = line.strip() - if not media_src or not os.path.isfile(media_src): - logging.warning("invalid file: ", media_src) - continue - sha1 = hash_utils.sha1sum_for_file(media_src) - sha_f.writelines("\t".join([sha1, media_src]) + '\n') - - media_dst = os.path.join(sha_folder, sha1) - if not os.path.isfile(media_dst): - shutil.copyfile(media_src, media_dst) - - idx += 1 - if idx % 5000 == 0: - PhaseLoggerCenter.update_phase(phase=hash_phase_name, local_percent=(idx / total_count)) - logging.info(f"finished {idx} / {total_count} hashes") - PhaseLoggerCenter.update_phase(phase=hash_phase_name) - return MirCode.RC_OK - - -def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: - importing_arg_parser = subparsers.add_parser("import", - parents=[parent_parser], - description="use this command to import data from img/anno folder", - help="import raw data") - importing_arg_parser.add_argument("--index-file", - dest="index_file", - type=str, - help="index of input media, one file per line") - importing_arg_parser.add_argument("--annotation-dir", - dest="anno", - type=str, - required=False, - help="corresponding annotation folder") - importing_arg_parser.add_argument("--gen-dir", dest="gen", type=str, help="storage path of generated data files") - importing_arg_parser.add_argument("--dataset-name", - dest="dataset_name", - type=str, - required=False, - help="name of the dataset to be created, use tid if not set.") - importing_arg_parser.add_argument("--src-revs", dest="src_revs", type=str, help="rev: source rev") - importing_arg_parser.add_argument("--dst-rev", - dest="dst_rev", - type=str, - required=True, - help="rev@tid: destination branch name and task id") - importing_arg_parser.add_argument('-w', dest='work_dir', type=str, required=False, help='working directory') - importing_arg_parser.add_argument('--ignore-unknown-types', - dest='ignore_unknown_types', - required=False, - action='store_true', - help='ignore unknown type names in annotation files') - importing_arg_parser.set_defaults(func=CmdImport) diff --git a/ymir/command/mir/commands/infer.py b/ymir/command/mir/commands/infer.py index c033c4b679..9a4ba16cc2 100644 --- a/ymir/command/mir/commands/infer.py +++ b/ymir/command/mir/commands/infer.py @@ -2,16 +2,18 @@ import json import logging import os -import subprocess import time -from typing import Any, List, Tuple, Optional +from typing import Any import yaml from mir.commands import base -from mir.tools import checker, class_ids, settings as mir_settings, utils as mir_utils +from mir.tools import checker, class_ids, models +from mir.tools import settings as mir_settings +from mir.tools import env_config from mir.tools.code import MirCode from mir.tools.errors import MirRuntimeError +from mir.tools.executant import prepare_executant_env, run_docker_executant class CmdInfer(base.BaseCommand): @@ -23,7 +25,7 @@ class CmdInfer(base.BaseCommand): b. prepare_assets: copy assets in orig index.tsv into work_dir/in/candidate, and make candidate index.tsv c. prepare_model: copy model to work_dir/in/models and unpack d. prepare_config_file: generate work_dir/in/config.yaml - e. run_docker_cmd: bind paths and run docker cmd + e. _execute_locally/_execute_in_openpai: bind paths and run docker cmd About path bindings: a. work_dir/in/assets or cache -> /in/assets @@ -35,15 +37,22 @@ class CmdInfer(base.BaseCommand): def run(self) -> int: logging.debug("command infer: %s", self.args) + work_dir_in_model = os.path.join(self.args.work_dir, 'in', 'models') + model_hash, stage_name = models.parse_model_hash_stage(self.args.model_hash_stage) + model_storage = models.prepare_model(model_location=self.args.model_location, + model_hash=model_hash, + stage_name=stage_name, + dst_model_path=work_dir_in_model) + return CmdInfer.run_with_args(work_dir=self.args.work_dir, mir_root=self.args.mir_root, - media_path=self.args.work_dir, - model_location=self.args.model_location, - model_hash=self.args.model_hash, + media_path=os.path.join(self.args.work_dir, 'assets'), + model_storage=model_storage, index_file=self.args.index_file, config_file=self.args.config_file, executor=self.args.executor, executant_name=self.args.executant_name, + run_as_root=self.args.run_as_root, run_infer=True, run_mining=False) @@ -51,14 +60,13 @@ def run(self) -> int: def run_with_args(work_dir: str, mir_root: str, media_path: str, - model_location: str, - model_hash: str, + model_storage: models.ModelStorage, index_file: str, config_file: str, executor: str, executant_name: str, + run_as_root: bool, task_id: str = f"default-infer-{time.time()}", - shm_size: str = None, run_infer: bool = False, run_mining: bool = False) -> int: """run infer command @@ -70,7 +78,7 @@ def run_with_args(work_dir: str, media_path (str): media path, all medias in `index_file` should all in this `media_path` in cmd infer, set it to work_dir, in cmd mining, set it to media_cache or work_dir model_location (str): model location - model_hash (str): model package hash (or model package name) + model_hash_stage (str): model_hash@stage_name index_file (str): index file, each line means an image abs path config_file (str): configuration file passed to infer executor executor (str): docker image name used to infer @@ -87,118 +95,79 @@ def run_with_args(work_dir: str, if not mir_root: mir_root = '.' if not work_dir: - logging.error('empty --work-dir, abort') - return MirCode.RC_CMD_INVALID_ARGS - if not model_location: - logging.error('empty --model-location, abort') - return MirCode.RC_CMD_INVALID_ARGS - if not model_hash: - logging.error('empty --model-hash, abort') - return MirCode.RC_CMD_INVALID_ARGS + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty --work-dir') if not index_file or not os.path.isfile(index_file): - logging.error(f"invalid --index-file: {index_file}, abort") - return MirCode.RC_CMD_INVALID_ARGS - - if not config_file: - logging.error("empty --task-config-file") - return MirCode.RC_CMD_INVALID_ARGS - if not os.path.isfile(config_file): - logging.error(f"invalid --task-config-file {config_file}, not a file, abort") - return MirCode.RC_CMD_INVALID_ARGS - + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"invalid --index-file: {index_file}") + if not config_file or not os.path.isfile(config_file): + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"invalid --task-config-file: {config_file}") if not run_infer and not run_mining: - logging.warning('invalid run_infer and run_mining: both false') - return MirCode.RC_OK - + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='invalid run_infer and run_mining: both false') if not executor: - logging.error('empty --executor, abort') - return MirCode.RC_CMD_INVALID_ARGS + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty --executor') return_code = checker.check(mir_root, [checker.Prerequisites.IS_INSIDE_MIR_REPO]) if return_code != MirCode.RC_OK: - return return_code + raise MirRuntimeError(error_code=return_code, error_message=f"check failed: {return_code}") if not executant_name: executant_name = task_id - _, work_model_path, work_out_path = _prepare_env(work_dir) - work_index_file = os.path.join(work_dir, 'in', 'candidate-index.tsv') - work_config_file = os.path.join(work_dir, 'in', 'config.yaml') - work_env_config_file = os.path.join(work_dir, 'in', 'env.yaml') + work_dir_in = os.path.join(work_dir, "in") + work_dir_out = os.path.join(work_dir, "out") + prepare_executant_env(work_dir_in=work_dir_in, work_dir_out=work_dir_out, asset_cache_dir=media_path) + + work_index_file = os.path.join(work_dir_in, 'candidate-index.tsv') + work_config_file = os.path.join(work_dir_in, 'config.yaml') + work_env_config_file = os.path.join(work_dir_in, 'env.yaml') _prepare_assets(index_file=index_file, work_index_file=work_index_file, media_path=media_path) - model_storage = mir_utils.prepare_model(model_location=model_location, - model_hash=model_hash, - dst_model_path=work_model_path) - model_names = model_storage.models class_names = model_storage.class_names if not class_names: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_FILE, - error_message=f"empty class names in model: {model_hash}") + raise MirRuntimeError( + error_code=MirCode.RC_CMD_INVALID_FILE, + error_message=f"empty class names in model: {model_storage.model_hash}@{model_storage.stage_name}") + model_names = model_storage.stages[model_storage.stage_name].files with open(config_file, 'r') as f: config = yaml.safe_load(f) - - prepare_config_file(config=config, - dst_config_file=work_config_file, - class_names=class_names, - task_id=task_id, - model_params_path=[os.path.join('/in/models', name) for name in model_names], - run_infer=run_infer, - run_mining=run_mining) - - mir_utils.generate_mining_infer_env_config_file(task_id=task_id, - run_mining=run_mining, - run_infer=run_infer, - env_config_file_path=work_env_config_file) - - available_gpu_id: str = config.get(mir_settings.TASK_CONTEXT_KEY, {}).get('available_gpu_id', '') - - run_docker_cmd(asset_path=media_path, - index_file_path=work_index_file, - model_path=work_model_path, - config_file_path=work_config_file, - env_file_path=work_env_config_file, - out_path=work_out_path, - executor=executor, - executant_name=executant_name, - shm_size=shm_size, - task_type=task_id, - gpu_id=available_gpu_id) + prepare_config_file( + config=config, + dst_config_file=work_config_file, + class_names=class_names, + task_id=task_id, + model_params_path=[os.path.join('/in/models', model_storage.stage_name, name) for name in model_names], + run_infer=run_infer, + run_mining=run_mining) + + env_config.generate_mining_infer_env_config_file(task_id=task_id, + run_mining=run_mining, + run_infer=run_infer, + env_config_file_path=work_env_config_file) + + task_config = config.get(mir_settings.TASK_CONTEXT_KEY, {}) + run_docker_executant( + work_dir_in=work_dir_in, + work_dir_out=work_dir_out, + executor=executor, + executant_name=executant_name, + executor_config=config[mir_settings.EXECUTOR_CONFIG_KEY], + gpu_id=task_config.get('available_gpu_id', ""), + run_as_root=run_as_root, + task_config=task_config, + ) if run_infer: - _process_infer_results(infer_result_file=os.path.join(work_out_path, 'infer-result.json'), - max_boxes=_get_max_boxes(config_file), mir_root=mir_root) + _process_infer_results(infer_result_file=os.path.join(work_dir_out, 'infer-result.json'), + max_boxes=_get_max_boxes(config_file), + mir_root=mir_root) return MirCode.RC_OK -def _prepare_env(work_dir: str) -> Tuple[str, str, str]: - """ - make the following dir structures: - * work_dir - * in - * assets - * models - * out - - if work_dir already exists, do nothing - - Args: - work_dir (str): work dir root - """ - os.makedirs(os.path.join(work_dir, 'in'), exist_ok=True) - work_assets_path = os.path.join(work_dir, 'in', 'assets') - work_model_path = os.path.join(work_dir, 'in', 'models') - work_out_path = os.path.join(work_dir, 'out') - os.makedirs(work_assets_path, exist_ok=True) - os.makedirs(work_model_path, exist_ok=True) - os.makedirs(work_out_path, exist_ok=True) - - return work_assets_path, work_model_path, work_out_path - - def _prepare_assets(index_file: str, work_index_file: str, media_path: str) -> None: """ generates in container index file @@ -254,16 +223,17 @@ def _process_infer_results(infer_result_file: str, max_boxes: int, mir_root: str with open(infer_result_file, 'r') as f: results = json.loads(f.read()) - class_id_mgr = class_ids.ClassIdManager(mir_root=mir_root) + class_id_mgr = class_ids.load_or_create_userlabels(mir_root=mir_root) + + for _, annotations_dict in results.get('detection', {}).items(): + # Compatible with previous version of format. + annotations = annotations_dict.get('boxes') or annotations_dict.get('annotations') + if not isinstance(annotations, list): + continue - if 'detection' in results: - names_annotations_dict = results['detection'] - for _, annotations_dict in names_annotations_dict.items(): - if 'annotations' in annotations_dict and isinstance(annotations_dict['annotations'], list): - annotations_list: List[dict] = annotations_dict['annotations'] - annotations_list.sort(key=(lambda x: x['score']), reverse=True) - annotations_list = [a for a in annotations_list if class_id_mgr.has_name(a['class_name'])] - annotations_dict['annotations'] = annotations_list[:max_boxes] + annotations.sort(key=(lambda x: x['score']), reverse=True) + annotations = [a for a in annotations if class_id_mgr.has_name(a['class_name'])] + annotations_dict['boxes'] = annotations[:max_boxes] with open(infer_result_file, 'w') as f: f.write(json.dumps(results, indent=4)) @@ -294,36 +264,6 @@ def prepare_config_file(config: dict, dst_config_file: str, **kwargs: Any) -> No yaml.dump(executor_config, f) -def run_docker_cmd(asset_path: str, index_file_path: str, model_path: str, config_file_path: str, env_file_path: str, - out_path: str, executor: str, executant_name: str, shm_size: Optional[str], task_type: str, - gpu_id: str) -> int: - """ runs infer or mining docker container """ - cmd = ['nvidia-docker', 'run', '--rm'] - # path bindings - cmd.append(f"-v{asset_path}:/in/assets:ro") - cmd.append(f"-v{model_path}:/in/models:ro") - cmd.append(f"-v{index_file_path}:/in/candidate-index.tsv") - cmd.append(f"-v{config_file_path}:/in/config.yaml") - cmd.append(f"-v{env_file_path}:/in/env.yaml") - cmd.append(f"-v{out_path}:/out") - # permissions and shared memory - cmd.extend(['--user', f"{os.getuid()}:{os.getgid()}"]) - if gpu_id: - cmd.extend(['--gpus', f"\"device={gpu_id}\""]) - if shm_size: - cmd.append(f"--shm-size={shm_size}") - cmd.extend(['--name', executant_name]) - cmd.append(executor) - - out_log_path = os.path.join(out_path, mir_settings.EXECUTOR_OUTLOG_NAME) - logging.info(f"starting {task_type} docker container with cmd: {' '.join(cmd)}") - with open(out_log_path, 'a') as f: - # run and wait, if non-zero value returned, raise - subprocess.run(cmd, check=True, stdout=f, stderr=f, text=True) - - return MirCode.RC_OK - - # public: cli bind def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: infer_arg_parser = subparsers.add_parser('infer', @@ -335,17 +275,17 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar required=True, dest='work_dir', type=str, - help='work place for mining and monitoring') + help='work place for this command, all images should put to /assets') infer_arg_parser.add_argument('--model-location', required=True, dest='model_location', type=str, help='model storage location for models') infer_arg_parser.add_argument('--model-hash', - dest='model_hash', + dest='model_hash_stage', type=str, required=True, - help='model hash to be used') + help='model hash@stage to be used') infer_arg_parser.add_argument('--task-config-file', dest='config_file', type=str, @@ -361,4 +301,8 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar dest='executant_name', type=str, help='docker container name for infer or mining') + infer_arg_parser.add_argument("--run-as-root", + dest="run_as_root", + action='store_true', + help="run executor as root user") infer_arg_parser.set_defaults(func=CmdInfer) diff --git a/ymir/command/mir/commands/init.py b/ymir/command/mir/commands/init.py index 0f14fa769a..a73baa82af 100644 --- a/ymir/command/mir/commands/init.py +++ b/ymir/command/mir/commands/init.py @@ -53,13 +53,18 @@ def run_with_args(mir_root: str, empty_rev: str) -> int: if return_code != MirCode.RC_OK: return return_code - class_ids.create_empty_if_not_exists(mir_root=mir_root) + class_ids.load_or_create_userlabels(mir_root=mir_root, create_ok=True) repo_git = scm.Scm(root_dir=mir_root, scm_executable='git') repo_git.init() repo_git.config(['core.fileMode', 'false']) CmdInit.__update_ignore(mir_root=mir_root, git=repo_git, ignored_items=['.mir_lock', '.mir']) + + with open(os.path.join(mir_root, '.gitattributes'), 'w') as f: + f.write('*.mir binary\n') + repo_git.add('.') + repo_git.commit(["-m", "first commit"]) # creates an empty dataset if empty_rev provided diff --git a/ymir/command/mir/commands/log.py b/ymir/command/mir/commands/log.py deleted file mode 100644 index 923f796b2d..0000000000 --- a/ymir/command/mir/commands/log.py +++ /dev/null @@ -1,60 +0,0 @@ -import argparse -import logging - -from mir import scm -from mir.commands import base -from mir.tools import checker -from mir.tools.code import MirCode - - -class CmdLog(base.BaseCommand): - @staticmethod - def run_with_args(mir_root: str, - decorate: bool, - oneline: bool, - graph: bool, - dog: bool, - with_stdout: bool = False) -> int: - return_code = checker.check(mir_root, [checker.Prerequisites.IS_INSIDE_MIR_REPO]) - if return_code != MirCode.RC_OK: - return return_code - - cmd_opts = [] - if dog: - decorate = True - oneline = True - graph = True - if decorate: - cmd_opts.append("--decorate") - if oneline: - cmd_opts.append("--oneline") - if graph: - cmd_opts.append("--graph") - - repo_git = scm.Scm(mir_root, scm_executable="git") - output_str = repo_git.log(cmd_opts, with_stdout=with_stdout) - if output_str: - logging.info("\n%s" % output_str) - - return MirCode.RC_OK - - def run(self) -> int: - logging.debug("command log: %s", self.args) - - return CmdLog.run_with_args(mir_root=self.args.mir_root, - decorate=self.args.decorate, - oneline=self.args.oneline, - graph=self.args.graph, - dog=self.args.dog) - - -def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: - log_arg_parser = subparsers.add_parser("log", - parents=[parent_parser], - description="use this command to show mir repo log", - help="show mir repo log") - log_arg_parser.add_argument("--oneline", help="print log in one line", action="store_true") - log_arg_parser.add_argument("--decorate", help="print log in a pretty way", action="store_true") - log_arg_parser.add_argument("--graph", help="print log in a graphic way", action="store_true") - log_arg_parser.add_argument("--dog", help="print log in one line graphic pretty way", action="store_true") - log_arg_parser.set_defaults(func=CmdLog) diff --git a/ymir/command/mir/commands/merge.py b/ymir/command/mir/commands/merge.py index 45fbd69bcd..f8ca784b58 100644 --- a/ymir/command/mir/commands/merge.py +++ b/ymir/command/mir/commands/merge.py @@ -74,37 +74,63 @@ def _merge_annotations(host_mir_annotations: mirpb.MirAnnotations, guest_mir_ann if not host_mir_annotations or not guest_mir_annotations: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="input host/guest mir_annotations is invalid") - if not host_mir_annotations.head_task_id: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message="no head_task_id found in host_mir_annotations") - if len(guest_mir_annotations.task_annotations) == 0: - logging.warning('empty guest_mir_annotations') - return - - task_id = host_mir_annotations.head_task_id - host_image_annotations = host_mir_annotations.task_annotations[host_mir_annotations.head_task_id].image_annotations - guest_image_annotations = guest_mir_annotations.task_annotations[ - guest_mir_annotations.head_task_id].image_annotations - host_only_ids, guest_only_ids, joint_ids = _match_asset_ids(set(host_image_annotations.keys()), - set(guest_image_annotations.keys())) + _merge_pair_annotations(host_annotation=host_mir_annotations.prediction, + guest_annotation=guest_mir_annotations.prediction, + target_annotation=host_mir_annotations.prediction, + strategy=strategy) + host_mir_annotations.prediction.eval_class_ids.extend(guest_mir_annotations.prediction.eval_class_ids) + + _merge_pair_annotations(host_annotation=host_mir_annotations.ground_truth, + guest_annotation=guest_mir_annotations.ground_truth, + target_annotation=host_mir_annotations.ground_truth, + strategy=strategy) + + _merge_annotation_image_cks(host_mir_annotations=host_mir_annotations, + guest_mir_annotations=guest_mir_annotations, + target_mir_annotations=host_mir_annotations, + strategy=strategy) + + +def _merge_pair_annotations(host_annotation: mirpb.SingleTaskAnnotations, guest_annotation: mirpb.SingleTaskAnnotations, + target_annotation: mirpb.SingleTaskAnnotations, strategy: str) -> None: + host_only_ids, guest_only_ids, joint_ids = _match_asset_ids(set(host_annotation.image_annotations.keys()), + set(guest_annotation.image_annotations.keys())) + + if strategy == "stop" and joint_ids: + raise MirRuntimeError(error_code=MirCode.RC_CMD_MERGE_ERROR, + error_message='found conflict annotations in strategy stop') + + for asset_id in host_only_ids: + target_annotation.image_annotations[asset_id].CopyFrom(host_annotation.image_annotations[asset_id]) + for asset_id in guest_only_ids: + target_annotation.image_annotations[asset_id].CopyFrom(guest_annotation.image_annotations[asset_id]) + for asset_id in joint_ids: + if strategy.lower() == "host": + if asset_id not in target_annotation.image_annotations: + target_annotation.image_annotations[asset_id].CopyFrom(host_annotation.image_annotations[asset_id]) + elif strategy.lower() == "guest": + target_annotation.image_annotations[asset_id].CopyFrom(guest_annotation.image_annotations[asset_id]) + + +def _merge_annotation_image_cks(host_mir_annotations: mirpb.MirAnnotations, guest_mir_annotations: mirpb.MirAnnotations, + target_mir_annotations: mirpb.MirAnnotations, strategy: str) -> None: + host_only_ids, guest_only_ids, joint_ids = _match_asset_ids(set(host_mir_annotations.image_cks.keys()), + set(guest_mir_annotations.image_cks.keys())) if strategy == "stop" and joint_ids: - raise MirRuntimeError(error_code=MirCode.RC_CMD_MERGE_ERROR, error_message='found conflicts in strategy stop') + raise MirRuntimeError(error_code=MirCode.RC_CMD_MERGE_ERROR, + error_message='found conflict image cks in strategy stop') for asset_id in host_only_ids: - host_mir_annotations.task_annotations[task_id].image_annotations[asset_id].CopyFrom( - host_image_annotations[asset_id]) + target_mir_annotations.image_cks[asset_id].CopyFrom(host_mir_annotations.image_cks[asset_id]) for asset_id in guest_only_ids: - host_mir_annotations.task_annotations[task_id].image_annotations[asset_id].CopyFrom( - guest_image_annotations[asset_id]) + target_mir_annotations.image_cks[asset_id].CopyFrom(guest_mir_annotations.image_cks[asset_id]) for asset_id in joint_ids: if strategy.lower() == "host": - if asset_id not in host_mir_annotations.task_annotations[task_id].image_annotations: - host_mir_annotations.task_annotations[task_id].image_annotations[asset_id].CopyFrom( - host_image_annotations[asset_id]) + if asset_id not in target_mir_annotations.image_cks: + target_mir_annotations.image_cks[asset_id].CopyFrom(host_mir_annotations.image_cks[asset_id]) elif strategy.lower() == "guest": - host_mir_annotations.task_annotations[task_id].image_annotations[asset_id].CopyFrom( - guest_image_annotations[asset_id]) + target_mir_annotations.image_cks[asset_id].CopyFrom(guest_mir_annotations.image_cks[asset_id]) def _get_union_keywords(host_keywords: Any, guest_keywords: Any, strategy: str) -> set: @@ -140,8 +166,8 @@ def _tvt_type_from_str(typ: str) -> 'mirpb.TvtType.V': raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message=f"invalid typ: {typ}") -def _merge_to_mir(host_mir_metadatas: mirpb.MirMetadatas, host_mir_annotations: mirpb.MirAnnotations, - mir_root: str, guest_typ_rev_tid: revs_parser.TypRevTid, strategy: str) -> int: +def _merge_to_mir(host_mir_metadatas: mirpb.MirMetadatas, host_mir_annotations: mirpb.MirAnnotations, mir_root: str, + guest_typ_rev_tid: revs_parser.TypRevTid, strategy: str) -> int: """ merge contents in `guest_typ_rev_tid` to `host_mir_xxx` @@ -195,8 +221,8 @@ def _merge_to_mir(host_mir_metadatas: mirpb.MirMetadatas, host_mir_annotations: return MirCode.RC_OK -def _exclude_from_mir(host_mir_metadatas: mirpb.MirMetadatas, host_mir_annotations: mirpb.MirAnnotations, - mir_root: str, branch_id: str, task_id: str) -> int: +def _exclude_from_mir(host_mir_metadatas: mirpb.MirMetadatas, host_mir_annotations: mirpb.MirAnnotations, mir_root: str, + branch_id: str, task_id: str) -> int: if not branch_id: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty branch id') if not host_mir_metadatas: @@ -213,8 +239,15 @@ def _exclude_from_mir(host_mir_metadatas: mirpb.MirMetadatas, host_mir_annotatio for asset_id in id_joint: del host_mir_metadatas.attributes[asset_id] - if asset_id in host_mir_annotations.task_annotations[host_mir_annotations.head_task_id].image_annotations: - del host_mir_annotations.task_annotations[host_mir_annotations.head_task_id].image_annotations[asset_id] + if asset_id in host_mir_annotations.prediction.image_annotations: + del host_mir_annotations.prediction.image_annotations[asset_id] + + if asset_id in host_mir_annotations.ground_truth.image_annotations: + del host_mir_annotations.ground_truth.image_annotations[asset_id] + + if asset_id in host_mir_annotations.image_cks: + del host_mir_annotations.image_cks[asset_id] + return MirCode.RC_OK @@ -240,12 +273,18 @@ def run_with_args(mir_root: str, src_revs: str, ex_src_revs: str, dst_rev: str, return return_code # Read host id mir data. - host_mir_metadatas = mirpb.MirMetadatas() - host_mir_annotations = mirpb.MirAnnotations() - - host_mir_annotations.head_task_id = dst_typ_rev_tid.tid - - for typ_rev_tid in src_typ_rev_tids: + host_typ_rev_tid = src_typ_rev_tids[0] + [host_mir_metadatas, host_mir_annotations + ] = mir_storage_ops.MirStorageOps.load_multiple_storages(mir_root=mir_root, + mir_branch=host_typ_rev_tid.rev, + mir_task_id=host_typ_rev_tid.tid, + ms_list=[mirpb.MIR_METADATAS, mirpb.MIR_ANNOTATIONS], + as_dict=False) + host_tvt_type = _tvt_type_from_str(host_typ_rev_tid.typ) + for asset_id in host_mir_metadatas.attributes: + host_mir_metadatas.attributes[asset_id].tvt_type = host_tvt_type + + for typ_rev_tid in src_typ_rev_tids[1:]: ret = _merge_to_mir(host_mir_metadatas=host_mir_metadatas, host_mir_annotations=host_mir_annotations, mir_root=mir_root, @@ -253,6 +292,9 @@ def run_with_args(mir_root: str, src_revs: str, ex_src_revs: str, dst_rev: str, strategy=strategy) if ret != MirCode.RC_OK: return ret + host_mir_annotations.prediction.model.Clear() + host_mir_annotations.prediction.executor_config = '' + host_mir_annotations.prediction.eval_class_ids[:] = set(host_mir_annotations.prediction.eval_class_ids) ex_typ_rev_tids = revs_parser.parse_arg_revs(ex_src_revs) if ex_src_revs else [] for typ_rev_tid in ex_typ_rev_tids: diff --git a/ymir/command/mir/commands/mining.py b/ymir/command/mir/commands/mining.py index b15ef1661a..3b7b7a111e 100644 --- a/ymir/command/mir/commands/mining.py +++ b/ymir/command/mir/commands/mining.py @@ -6,11 +6,11 @@ from typing import Dict, Optional, Set from google.protobuf import json_format -import yaml from mir.commands import base, infer from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, class_ids, data_exporter, mir_storage_ops, revs_parser, utils as mir_utils +from mir.tools import annotations, checker, class_ids, env_config, exporter +from mir.tools import mir_storage_ops, models, revs_parser from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out from mir.tools.errors import MirContainerError, MirRuntimeError @@ -35,14 +35,15 @@ def run(self) -> int: src_revs=self.args.src_revs, dst_rev=self.args.dst_rev, mir_root=self.args.mir_root, - model_hash=self.args.model_hash, + model_hash_stage=self.args.model_hash_stage, model_location=self.args.model_location, media_location=self.args.media_location, config_file=self.args.config_file, topk=self.args.topk, - add_annotations=self.args.add_annotations, + add_prediction=self.args.add_prediction, executor=self.args.executor, - executant_name=self.args.executant_name) + executant_name=self.args.executant_name, + run_as_root=self.args.run_as_root) @staticmethod @command_run_in_out @@ -51,14 +52,15 @@ def run_with_args(work_dir: str, src_revs: str, dst_rev: str, mir_root: str, - model_hash: str, + model_hash_stage: str, media_location: str, model_location: str, config_file: str, executor: str, executant_name: str, + run_as_root: bool, topk: int = None, - add_annotations: bool = False) -> int: + add_prediction: bool = False) -> int: """ runs a mining task \n Args: @@ -67,13 +69,13 @@ def run_with_args(work_dir: str, src_revs: data branch name and base task id dst_rev: destination branch name and task id mir_root: mir repo path, in order to run in non-mir folder. - model_hash: used to target model, use prep_tid if non-set + model_hash_stage: model_hash@stage_name media_location, model_location: location of assets. config_file: path to the config file executor: executor name, currently, the docker image name executant_name: docker container name topk: top k assets you want to select in the result workspace, positive integer or None (no mining) - add_annotations: if true, write new annotations into annotations.mir + add_prediction: if true, write new prediction into annotations.mir Returns: error code """ @@ -85,8 +87,8 @@ def run_with_args(work_dir: str, if not media_location or not model_location: logging.error('media or model location cannot be none!') return MirCode.RC_CMD_INVALID_ARGS - if not model_hash: - logging.error('model_hash is required.') + if not model_hash_stage: + logging.error('model_hash_stage is required.') return MirCode.RC_CMD_INVALID_ARGS src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) @@ -141,38 +143,50 @@ def run_with_args(work_dir: str, logging.error('mining enviroment prepare error!') return ret - _prepare_assets(mir_metadatas=mir_metadatas, - mir_root=mir_root, - src_rev_tid=src_typ_rev_tid, - media_location=media_location, - work_asset_path=work_asset_path, - work_index_file=work_index_file) + # export assets. + # mining export abs assets path, which will be converted in-docker path in infer.py. + ec = mirpb.ExportConfig(asset_format=mirpb.AssetFormat.AF_RAW, + asset_dir=work_asset_path, + asset_index_file=work_index_file, + media_location=media_location, + need_sub_folder=True, + anno_format=mirpb.AnnoFormat.AF_NO_ANNOTATION,) + export_code = exporter.export_mirdatas_to_dir( + mir_metadatas=mir_metadatas, + ec=ec, + ) + if export_code != MirCode.RC_OK: + return export_code + + model_hash, stage_name = models.parse_model_hash_stage(model_hash_stage) + model_storage = models.prepare_model(model_location=model_location, + model_hash=model_hash, + stage_name=stage_name, + dst_model_path=work_model_path) return_code = MirCode.RC_OK return_msg = '' try: - infer.CmdInfer.run_with_args(work_dir=work_dir, - mir_root=mir_root, - media_path=work_asset_path, - model_location=model_location, - model_hash=model_hash, - index_file=work_index_file, - config_file=config_file, - task_id=dst_typ_rev_tid.tid, - shm_size=_get_shm_size(config_file), - executor=executor, - executant_name=executant_name, - run_infer=add_annotations, - run_mining=(topk is not None)) + return_code = infer.CmdInfer.run_with_args(work_dir=work_dir, + mir_root=mir_root, + media_path=work_asset_path, + model_storage=model_storage, + index_file=work_index_file, + config_file=config_file, + task_id=dst_typ_rev_tid.tid, + executor=executor, + executant_name=executant_name, + run_as_root=run_as_root, + run_infer=add_prediction, + run_mining=(topk is not None)) except CalledProcessError: return_code = MirCode.RC_CMD_CONTAINER_ERROR - return_msg = mir_utils.collect_executor_outlog_tail(work_dir=work_dir) + return_msg = env_config.collect_executor_outlog_tail(work_dir=work_dir) # catch other exceptions in command_run_in_out task = mir_storage_ops.create_task(task_type=mirpb.TaskTypeMining, task_id=dst_typ_rev_tid.tid, - message='mining', - model_hash=model_hash, + message=f"mining with model: {model_hash_stage}", src_revs=src_typ_rev_tid.rev_tid, dst_rev=dst_typ_rev_tid.rev_tid, return_code=return_code, @@ -186,7 +200,8 @@ def run_with_args(work_dir: str, dst_typ_rev_tid=dst_typ_rev_tid, src_typ_rev_tid=src_typ_rev_tid, topk=topk, - add_annotations=add_annotations, + add_prediction=add_prediction, + model_storage=model_storage, task=task) logging.info(f"mining done, results at: {work_out_path}") @@ -195,10 +210,13 @@ def run_with_args(work_dir: str, # protected: post process def _process_results(mir_root: str, export_out: str, dst_typ_rev_tid: revs_parser.TypRevTid, - src_typ_rev_tid: revs_parser.TypRevTid, topk: Optional[int], add_annotations: bool, - task: mirpb.Task) -> int: + src_typ_rev_tid: revs_parser.TypRevTid, topk: Optional[int], add_prediction: bool, + model_storage: models.ModelStorage, task: mirpb.Task) -> int: # step 1: build topk results: # read old + mir_metadatas: mirpb.MirMetadatas + mir_annotations: mirpb.MirAnnotations + [mir_metadatas, mir_annotations] = mir_storage_ops.MirStorageOps.load_multiple_storages( mir_root=mir_root, mir_branch=src_typ_rev_tid.rev, @@ -213,10 +231,9 @@ def _process_results(mir_root: str, export_out: str, dst_typ_rev_tid: revs_parse if topk is not None else set(mir_metadatas.attributes.keys())) infer_result_file_path = os.path.join(export_out, 'infer-result.json') - cls_id_mgr = class_ids.ClassIdManager(mir_root=mir_root) - asset_id_to_annotations = (_get_infer_annotations(file_path=infer_result_file_path, - asset_ids_set=asset_ids_set, - cls_id_mgr=cls_id_mgr) if add_annotations else {}) + cls_id_mgr = class_ids.load_or_create_userlabels(mir_root=mir_root) + asset_id_to_annotations = (_get_infer_annotations( + file_path=infer_result_file_path, asset_ids_set=asset_ids_set, cls_id_mgr=cls_id_mgr) if add_prediction else {}) # step 2: update mir data files # update mir metadatas @@ -225,21 +242,35 @@ def _process_results(mir_root: str, export_out: str, dst_typ_rev_tid: revs_parse matched_mir_metadatas.attributes[asset_id].CopyFrom(mir_metadatas.attributes[asset_id]) logging.info(f"matched: {len(matched_mir_metadatas.attributes)}, overriding metadatas.mir") - # update mir annotations + # update mir annotations: predictions matched_mir_annotations = mirpb.MirAnnotations() - matched_task_annotation = matched_mir_annotations.task_annotations[dst_typ_rev_tid.tid] - if add_annotations: + prediction = matched_mir_annotations.prediction + prediction.type = mirpb.AnnoType.AT_DET_BOX + if add_prediction: # add new for asset_id, single_image_annotations in asset_id_to_annotations.items(): - matched_task_annotation.image_annotations[asset_id].CopyFrom(single_image_annotations) + prediction.image_annotations[asset_id].CopyFrom(single_image_annotations) + prediction.eval_class_ids[:] = set( + cls_id_mgr.id_for_names(model_storage.class_names, drop_unknown_names=True)[0]) + prediction.executor_config = json.dumps(model_storage.executor_config) + prediction.model.CopyFrom(model_storage.get_model_meta()) else: # use old - task_annotation = mir_annotations.task_annotations[mir_annotations.head_task_id] - joint_asset_ids_set = set(task_annotation.image_annotations.keys()) & asset_ids_set - for asset_id in joint_asset_ids_set: - matched_task_annotation.image_annotations[asset_id].CopyFrom(task_annotation.image_annotations[asset_id]) - - # mir_keywords: auto generated from mir_annotations, so do nothing + pred_asset_ids = set(mir_annotations.prediction.image_annotations.keys()) & asset_ids_set + for asset_id in pred_asset_ids: + prediction.image_annotations[asset_id].CopyFrom(mir_annotations.prediction.image_annotations[asset_id]) + annotations.copy_annotations_pred_meta(src_task_annotations=mir_annotations.prediction, + dst_task_annotations=prediction) + + # update mir annotations: ground truth + ground_truth = matched_mir_annotations.ground_truth + gt_asset_ids = set(mir_annotations.ground_truth.image_annotations.keys()) & asset_ids_set + for asset_id in gt_asset_ids: + ground_truth.image_annotations[asset_id].CopyFrom(mir_annotations.ground_truth.image_annotations[asset_id]) + + image_ck_asset_ids = set(mir_annotations.image_cks.keys() & asset_ids_set) + for asset_id in image_ck_asset_ids: + matched_mir_annotations.image_cks[asset_id].CopyFrom(mir_annotations.image_cks[asset_id]) # step 3: store results and commit. mir_datas = { @@ -275,37 +306,40 @@ def _get_topk_asset_ids(file_path: str, topk: int) -> Set[str]: def _get_infer_annotations(file_path: str, asset_ids_set: Set[str], - cls_id_mgr: class_ids.ClassIdManager) -> Dict[str, mirpb.SingleImageAnnotations]: + cls_id_mgr: class_ids.UserLabels) -> Dict[str, mirpb.SingleImageAnnotations]: asset_id_to_annotations: dict = {} with open(file_path, 'r') as f: results = json.loads(f.read()) - if 'detection' not in results or not isinstance(results['detection'], dict): + detections = results.get('detection') + if not isinstance(detections, dict): logging.error('invalid infer-result.json') return asset_id_to_annotations - names_annotations_dict = results['detection'] - for asset_name, annotations_dict in names_annotations_dict.items(): - if 'annotations' not in annotations_dict or not isinstance(annotations_dict['annotations'], list): + for asset_name, annotations_dict in detections.items(): + annotations = annotations_dict.get('boxes') + if not isinstance(annotations, list): + logging.error(f"invalid annotations: {annotations}") continue + asset_id = os.path.splitext(os.path.basename(asset_name))[0] if asset_id not in asset_ids_set: - logging.info(f"unknown asset name: {asset_name}, ignore") continue + single_image_annotations = mirpb.SingleImageAnnotations() idx = 0 - for annotation_dict in annotations_dict['annotations']: + for annotation_dict in annotations: class_id = cls_id_mgr.id_and_main_name_for_name(name=annotation_dict['class_name'])[0] # ignore unknown class ids if class_id < 0: continue - annotation = mirpb.Annotation() + annotation = mirpb.ObjectAnnotation() annotation.index = idx json_format.ParseDict(annotation_dict['box'], annotation.box) annotation.class_id = class_id annotation.score = float(annotation_dict.get('score', 0)) - single_image_annotations.annotations.append(annotation) + single_image_annotations.boxes.append(annotation) idx += 1 asset_id_to_annotations[asset_id] = single_image_annotations return asset_id_to_annotations @@ -323,32 +357,6 @@ def _prepare_env(export_root: str, work_in_path: str, work_out_path: str, work_a return MirCode.RC_OK -def _prepare_assets(mir_metadatas: mirpb.MirMetadatas, mir_root: str, src_rev_tid: revs_parser.TypRevTid, - media_location: str, work_asset_path: str, work_index_file: str) -> None: - img_list = set(mir_metadatas.attributes.keys()) - data_exporter.export(mir_root=mir_root, - assets_location=media_location, - class_type_ids={}, - asset_ids=img_list, - asset_dir=work_asset_path, - annotation_dir='', - need_ext=True, - need_id_sub_folder=True, - base_branch=src_rev_tid.rev, - base_task_id=src_rev_tid.tid, - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_NO_ANNOTATION, - index_file_path=work_index_file, - index_assets_prefix=work_asset_path) - - -def _get_shm_size(mining_config_file_path: str) -> str: - with open(mining_config_file_path, 'r') as f: - mining_config = yaml.safe_load(f.read()) - if 'shm_size' not in mining_config: - return '16G' - return mining_config['shm_size'] - - # public: arg parser def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: mining_arg_parser = subparsers.add_parser('mining', @@ -380,16 +388,16 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar type=int, required=False, help='if set, discard samples out of topk, sorting by scores.') - mining_arg_parser.add_argument('--add-annotations', - dest='add_annotations', + mining_arg_parser.add_argument('--add-prediction', + dest='add_prediction', action='store_true', required=False, - help='if set, also add inference result to annotations') + help='if set, also add inference result') mining_arg_parser.add_argument('--model-hash', - dest='model_hash', + dest='model_hash_stage', type=str, required=True, - help='model hash to be used') + help='model hash@stage to be used') mining_arg_parser.add_argument('--src-revs', dest='src_revs', type=str, @@ -415,4 +423,8 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar dest='executant_name', type=str, help='docker container name for mining') + mining_arg_parser.add_argument("--run-as-root", + dest="run_as_root", + action='store_true', + help="run executor as root user") mining_arg_parser.set_defaults(func=CmdMining) diff --git a/ymir/command/mir/commands/reset.py b/ymir/command/mir/commands/reset.py deleted file mode 100644 index b556386efc..0000000000 --- a/ymir/command/mir/commands/reset.py +++ /dev/null @@ -1,41 +0,0 @@ -import argparse -import logging - -from mir import scm -from mir.commands import base -from mir.tools import checker -from mir.tools.code import MirCode - - -class CmdReset(base.BaseCommand): - @staticmethod - def run_with_args(mir_root: str, reset_hard: bool) -> int: - return_code = checker.check(mir_root, - [checker.Prerequisites.IS_INSIDE_MIR_REPO, checker.Prerequisites.IS_DIRTY]) - if return_code != MirCode.RC_OK: - return return_code - - repo_git = scm.Scm(mir_root, scm_executable="git") - output_str = repo_git.reset("--hard" if reset_hard else None) - if output_str: - logging.info("\n%s" % output_str) - - return MirCode.RC_OK - - def run(self) -> int: - logging.debug("command reset: %s", self.args) - - return CmdReset.run_with_args(self.args.mir_root, self.args.reset_hard) - - -def bind_to_subparsers(subparsers: argparse._SubParsersAction, - parent_parser: argparse.ArgumentParser) -> None: - reset_arg_parser = subparsers.add_parser("reset", - parents=[parent_parser], - description="use this command to undo changes to mir repo", - help="undo changes to repo") - reset_arg_parser.add_argument("--hard", - dest="reset_hard", - action="store_true", - help="hard reset mode: undo add process and also undo the changes") - reset_arg_parser.set_defaults(func=CmdReset) diff --git a/ymir/command/mir/commands/sampling.py b/ymir/command/mir/commands/sampling.py index 95ca4e67d0..0a7f34365f 100644 --- a/ymir/command/mir/commands/sampling.py +++ b/ymir/command/mir/commands/sampling.py @@ -4,7 +4,7 @@ from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import mir_storage_ops, revs_parser +from mir.tools import annotations, mir_storage_ops, revs_parser from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out from mir.tools.errors import MirRuntimeError @@ -35,11 +35,13 @@ def run_with_args(mir_root: str, work_dir: str, src_revs: str, dst_rev: str, cou mir_root = '.' # read all - [mir_metadatas, mir_annotations, mir_tasks] = mir_storage_ops.MirStorageOps.load_multiple_storages( + mir_metadatas: mirpb.MirMetadatas + mir_annotations: mirpb.MirAnnotations + [mir_metadatas, mir_annotations] = mir_storage_ops.MirStorageOps.load_multiple_storages( mir_root=mir_root, mir_branch=src_typ_rev_tid.rev, mir_task_id=src_typ_rev_tid.tid, - ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS, mirpb.MirStorage.MIR_TASKS], + ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS], as_dict=False, ) assets_count = len(mir_metadatas.attributes) @@ -58,24 +60,25 @@ def run_with_args(mir_root: str, work_dir: str, src_revs: str, dst_rev: str, cou # sampling if sampled_assets_count < assets_count: sampled_asset_ids = random.sample(mir_metadatas.attributes.keys(), sampled_assets_count) + # sampled_mir_metadatas and sampled_mir_annotations - image_annotations = mir_annotations.task_annotations[mir_annotations.head_task_id].image_annotations sampled_mir_metadatas = mirpb.MirMetadatas() sampled_mir_annotations = mirpb.MirAnnotations() for asset_id in sampled_asset_ids: sampled_mir_metadatas.attributes[asset_id].CopyFrom(mir_metadatas.attributes[asset_id]) - if asset_id in image_annotations: - sampled_mir_annotations.task_annotations[dst_typ_rev_tid.tid].image_annotations[asset_id].CopyFrom( - image_annotations[asset_id]) + sampled_mir_annotations.prediction.image_annotations[asset_id].CopyFrom( + mir_annotations.prediction.image_annotations[asset_id]) + sampled_mir_annotations.ground_truth.image_annotations[asset_id].CopyFrom( + mir_annotations.ground_truth.image_annotations[asset_id]) else: # if equals sampled_mir_metadatas = mir_metadatas - sampled_mir_annotations = mirpb.MirAnnotations() - sampled_mir_annotations.head_task_id = dst_typ_rev_tid.tid - sampled_mir_annotations.task_annotations[dst_typ_rev_tid.tid].CopyFrom( - mir_annotations.task_annotations[mir_annotations.head_task_id]) + sampled_mir_annotations = mir_annotations + + annotations.copy_annotations_pred_meta(src_task_annotations=mir_annotations.prediction, + dst_task_annotations=sampled_mir_annotations.prediction) - # mir_tasks + # commit message = f"sampling src: {src_revs}, dst: {dst_rev}, count: {count}, rate: {rate}" task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeSampling, task_id=dst_typ_rev_tid.tid, diff --git a/ymir/command/mir/commands/show.py b/ymir/command/mir/commands/show.py index a7244c282f..18b5f7de1b 100644 --- a/ymir/command/mir/commands/show.py +++ b/ymir/command/mir/commands/show.py @@ -1,12 +1,10 @@ import argparse import logging -from typing import Any, List - from google.protobuf import json_format from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, class_ids, context, mir_storage, mir_storage_ops, revs_parser +from mir.tools import checker, mir_storage, mir_storage_ops, revs_parser from mir.tools.code import MirCode @@ -27,18 +25,7 @@ def run_with_args(cls, mir_root: str, src_revs: str, verbose: bool) -> int: if check_code != MirCode.RC_OK: return check_code - # show infos - cls._show_cis(mir_root, src_typ_rev_tid, verbose) - cls._show_cks(mir_root, src_typ_rev_tid, verbose) - cls._show_general(mir_root, src_typ_rev_tid, verbose) - - return MirCode.RC_OK - - @classmethod - def _show_general(cls, mir_root: str, src_typ_rev_tid: revs_parser.TypRevTid, verbose: bool) -> None: - cls._show_general_context_config(mir_root=mir_root) - - [metadatas, annotations, _, tasks, + [metadatas, annotations, keywords, tasks, context] = mir_storage_ops.MirStorageOps.load_multiple_storages(mir_root=mir_root, mir_branch=src_typ_rev_tid.rev, mir_task_id=src_typ_rev_tid.tid, @@ -46,9 +33,11 @@ def _show_general(cls, mir_root: str, src_typ_rev_tid: revs_parser.TypRevTid, ve as_dict=False) cls._show_general_metadatas(metadatas) cls._show_general_annotations(annotations) - cls._show_general_context(context) + cls._show_general_context(context, keywords) cls._show_general_tasks(tasks, verbose) + return MirCode.RC_OK + @classmethod def _show_general_metadatas(cls, mir_metadatas: mirpb.MirMetadatas) -> None: un_tr_va_te_counts = [0, 0, 0, 0] @@ -64,26 +53,14 @@ def _show_general_metadatas(cls, mir_metadatas: mirpb.MirMetadatas) -> None: @classmethod def _show_general_annotations(cls, mir_annotations: mirpb.MirAnnotations) -> None: - hid = mir_annotations.head_task_id - print(f"annotations.mir: hid: {hid}," f" {len(mir_annotations.task_annotations[hid].image_annotations)} assets") - - @classmethod - def _show_general_context(cls, mir_context: mirpb.MirContext) -> None: - print(f"context.mir: negative assets cnt: {mir_context.negative_images_cnt}") - print(f" project negative assets cnt: {mir_context.project_negative_images_cnt}") - print(f" total assets cnt: {mir_context.images_cnt}") + print(f" pred: {len(mir_annotations.prediction.image_annotations)}," + f" gt: {len(mir_annotations.ground_truth.image_annotations)}") @classmethod - def _show_general_context_config(cls, mir_root: str) -> None: - project_class_ids = context.load(mir_root=mir_root) - if project_class_ids: - class_id_and_names: List[str] = [] - class_ids_mgr = class_ids.ClassIdManager(mir_root=mir_root) - for class_id in project_class_ids: - class_id_and_names.append(f"{class_id} ({class_ids_mgr.main_name_for_id(class_id)})") - print(f"project classes: {'; '.join(class_id_and_names)}") - else: - print('project classes: none') + def _show_general_context(cls, mir_context: mirpb.MirContext, mir_keywords: mirpb.MirKeywords) -> None: + print(f" main ck count: {len(mir_keywords.ck_idx)}") + print(f" gt tag count: {len(mir_keywords.gt_idx.tags)}") + print(f" pred tag count: {len(mir_keywords.pred_idx.tags)}") @classmethod def _show_general_tasks(cls, mir_tasks: mirpb.MirTasks, verbose: bool) -> None: @@ -93,39 +70,12 @@ def _show_general_tasks(cls, mir_tasks: mirpb.MirTasks, verbose: bool) -> None: print(f"tasks.mir: hid: {hid}, code: {task.return_code}, error msg: {task.return_msg}\n" f" model hash: {task.model.model_hash}\n" f" map: {task.model.mean_average_precision}\n" - f" executor: {task.executor}") + f" executor: {task.executor}\n" + f" stages: {list(task.model.stages.keys())}\n" + f" best stage name: {task.model.best_stage_name}") else: print(f"tasks.mir: {json_format.MessageToDict(mir_tasks, preserving_proto_field_name=True)}") - @classmethod - def _show_cis(cls, mir_root: str, src_typ_rev_tid: revs_parser.TypRevTid, verbose: bool) -> None: - mir_context: mirpb.MirContext = mir_storage_ops.MirStorageOps.load_single_storage( - mir_root=mir_root, mir_branch=src_typ_rev_tid.rev, mir_task_id=src_typ_rev_tid.tid, ms=mirpb.MIR_CONTEXT) - cls_id_mgr = class_ids.ClassIdManager(mir_root=mir_root) - if verbose: - print('predefined key ids and assets count:') - cls._show_cis_verbose(predefined_keyids_cnt=mir_context.predefined_keyids_cnt, cls_id_mgr=cls_id_mgr) - if mir_context.project_predefined_keyids_cnt: - print('project predefined key ids and assets count:') - cls._show_cis_verbose(predefined_keyids_cnt=mir_context.project_predefined_keyids_cnt, - cls_id_mgr=cls_id_mgr) - else: - type_names = [cls_id_mgr.main_name_for_id(ci) or '' for ci in mir_context.predefined_keyids_cnt.keys()] - print(';'.join(type_names)) - - @classmethod - def _show_cis_verbose(cls, predefined_keyids_cnt: Any, cls_id_mgr: class_ids.ClassIdManager) -> None: - for ci, cnt in predefined_keyids_cnt.items(): - main_name = cls_id_mgr.main_name_for_id(ci) - if main_name: - print(f" {main_name}: {cnt}") - else: - print(f" {ci} (unknown ci): {cnt}") - - @classmethod - def _show_cks(cls, mir_root: str, src_typ_rev_tid: revs_parser.TypRevTid, verbose: bool) -> None: - print('') # currently no customized keywords - def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: show_arg_parser = subparsers.add_parser('show', @@ -133,5 +83,9 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar description='use this command to show current workspace informations', help='show current workspace informations') show_arg_parser.add_argument('--verbose', dest='verbose', action='store_true', help='show verbose info') - show_arg_parser.add_argument('--src-revs', dest='src_revs', type=str, help='rev@bid: source rev and base task id') + show_arg_parser.add_argument('--src-revs', + dest='src_revs', + type=str, + default='HEAD', + help='rev@bid: source rev and base task id') show_arg_parser.set_defaults(func=CmdShow) diff --git a/ymir/command/mir/commands/training.py b/ymir/command/mir/commands/training.py index 8fad77b36e..fed8d79da4 100644 --- a/ymir/command/mir/commands/training.py +++ b/ymir/command/mir/commands/training.py @@ -2,97 +2,101 @@ import logging import os import time -import subprocess from subprocess import CalledProcessError -import traceback -from typing import Any, List, Optional, Set, Tuple +from typing import Any, Dict, List, Optional, Tuple +from mir.version import ymir_model_salient_version, YMIR_VERSION from tensorboardX import SummaryWriter import yaml from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, class_ids, context, data_exporter, mir_storage_ops, revs_parser -from mir.tools import settings as mir_settings, utils as mir_utils +from mir.tools import checker, class_ids, env_config, exporter +from mir.tools import mir_storage_ops, models, revs_parser +from mir.tools import settings as mir_settings from mir.tools.command_run_in_out import command_run_in_out from mir.tools.code import MirCode from mir.tools.errors import MirContainerError, MirRuntimeError +from mir.tools.executant import prepare_executant_env, run_docker_executant # private: post process -def _process_model_storage(out_root: str, model_upload_location: str, executor_config: dict, - task_context: dict) -> Tuple[str, float]: +def _find_and_save_model(out_root: str, model_upload_location: str, executor_config: dict, + task_context: dict) -> models.ModelStorage: """ find and save models Returns: - model hash, model mAP and ModelStorage + ModelStorage """ out_model_dir = os.path.join(out_root, "models") - model_paths, model_mAP = _find_models(out_model_dir) - if not model_paths: - # if have no models - return '', model_mAP - - model_storage = mir_utils.ModelStorage(executor_config=executor_config, - task_context=dict(**task_context, - mAP=model_mAP, - type=mirpb.TaskType.TaskTypeTraining), - models=[os.path.basename(model_path) for model_path in model_paths]) - model_sha1 = mir_utils.pack_and_copy_models(model_storage=model_storage, - model_dir_path=out_model_dir, - model_location=model_upload_location) - - return model_sha1, model_mAP - - -def _find_models(model_root: str) -> Tuple[List[str], float]: + model_stages, best_stage_name, attachments = _find_model_stages_and_attachments(out_model_dir) + model_storage = models.ModelStorage(executor_config=executor_config, + task_context=dict(**task_context, + mAP=model_stages[best_stage_name].mAP, + type=mirpb.TaskType.TaskTypeTraining), + stages=model_stages, + best_stage_name=best_stage_name, + attachments=attachments, + package_version=ymir_model_salient_version(YMIR_VERSION)) + models.pack_and_copy_models(model_storage=model_storage, + model_dir_path=out_model_dir, + model_location=model_upload_location) + + return model_storage + + +def _find_model_stages_and_attachments( + model_root: str) -> Tuple[Dict[str, models.ModelStageStorage], str, Dict[str, Any]]: """ - find models in `model_root`, and returns model names and mAP + find models in `model_root`, and returns all model stages and attachments Args: model_root (str): model root Returns: - Tuple[List[str], float]: list of model names and map + Tuple[Dict[str, models_util.ModelStageStorage], str, Dict[str, Any]]: + all model stages, best model stage name, attachments """ - model_names = [] - model_mAP = 0.0 + # model_names = [] + # model_mAP = 0.0 + model_stages: Dict[str, models.ModelStageStorage] = {} + best_stage_name = '' + attachments: Dict[str, Any] = {} result_yaml_path = os.path.join(model_root, "result.yaml") try: with open(result_yaml_path, "r") as f: yaml_obj = yaml.safe_load(f.read()) + if 'model' in yaml_obj: + # old trainig result file: read models from `model` field model_names = yaml_obj["model"] model_mAP = float(yaml_obj["map"]) - except FileNotFoundError: - logging.warning(traceback.format_exc()) - return [], 0.0 - - return ([os.path.join(model_root, os.path.basename(name)) for name in model_names], model_mAP) - - -# private: process -def _run_train_cmd(cmd: List[str], out_log_path: str) -> int: - """ - invoke training command - - Args: - cmd (str): command - out_log_path (str): path of log file - Returns: - int: MirCode.RC_OK if success + best_stage_name = 'default_best_stage' + model_stages[best_stage_name] = models.ModelStageStorage(stage_name=best_stage_name, + files=model_names, + mAP=model_mAP, + timestamp=int(time.time())) + elif 'model_stages' in yaml_obj: + # new training result file: read from model stages + for k, v in yaml_obj['model_stages'].items(): + model_stages[k] = models.ModelStageStorage(stage_name=k, + files=v['files'], + mAP=float(v['mAP']), + timestamp=v['timestamp']) + + best_stage_name = yaml_obj['best_stage_name'] + if 'attachments' in yaml_obj: + attachments = yaml_obj['attachments'] + except FileNotFoundError: + error_message = f"can not find file: {result_yaml_path}, executor may have errors, see ymir-executor-out.log" + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_FILE, error_message=error_message) - Raises: - Exception: if out_log_path can not open for append, or cmd returned non-zero code - """ - logging.info(f"training with cmd: {cmd}") - logging.info(f"out log path: {out_log_path}") - with open(out_log_path, 'a') as f: - # run and wait, if non-zero value returned, raise - subprocess.run(cmd, check=True, stdout=f, stderr=f, text=True) + if not model_stages: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='can not find model stages in result.yaml') - return MirCode.RC_OK + return (model_stages, best_stage_name, attachments) # private: pre process @@ -112,13 +116,7 @@ def _generate_config(executor_config: Any, out_config_path: str, task_id: str, return executor_config -def _get_shm_size(executor_config: dict) -> str: - if 'shm_size' not in executor_config: - return '16G' - return executor_config['shm_size'] - - -def _prepare_pretrained_models(model_location: str, model_hash: str, dst_model_dir: str) -> List[str]: +def _prepare_pretrained_models(model_location: str, model_hash_stage: str, dst_model_dir: str) -> List[str]: """ prepare pretrained models * extract models to dst_model_dir @@ -126,19 +124,21 @@ def _prepare_pretrained_models(model_location: str, model_hash: str, dst_model_d Args: model_location (str): model location - model_hash (str): model package hash + model_hash_stage (str): model package hash dst_model_dir (str): dir where you want to extract model files to Returns: - List[str]: model names + List[str]: stage_name/model_names """ - if not model_hash: + if not model_hash_stage: return [] - model_storage = mir_utils.prepare_model(model_location=model_location, - model_hash=model_hash, - dst_model_path=dst_model_dir) + model_hash, stage_name = models.parse_model_hash_stage(model_hash_stage) + model_storage = models.prepare_model(model_location=model_location, + model_hash=model_hash, + stage_name=stage_name, + dst_model_path=dst_model_dir) - return model_storage.models + return [f"{stage_name}/{file_name}" for file_name in model_storage.stages[stage_name].files] def _get_task_parameters(config: dict) -> str: @@ -152,7 +152,7 @@ def run(self) -> int: return CmdTrain.run_with_args(work_dir=self.args.work_dir, asset_cache_dir=self.args.asset_cache_dir, model_upload_location=self.args.model_path, - pretrained_model_hash=self.args.model_hash, + pretrained_model_hash_stage=self.args.model_hash_stage, src_revs=self.args.src_revs, dst_rev=self.args.dst_rev, mir_root=self.args.mir_root, @@ -160,26 +160,28 @@ def run(self) -> int: tensorboard_dir=self.args.tensorboard_dir, executor=self.args.executor, executant_name=self.args.executant_name, + run_as_root=self.args.run_as_root, config_file=self.args.config_file) @staticmethod @command_run_in_out def run_with_args(work_dir: str, - asset_cache_dir: Optional[str], + asset_cache_dir: str, model_upload_location: str, - pretrained_model_hash: str, + pretrained_model_hash_stage: str, executor: str, executant_name: str, src_revs: str, dst_rev: str, config_file: Optional[str], tensorboard_dir: str, + run_as_root: bool, mir_root: str = '.', media_location: str = '') -> int: if not model_upload_location: logging.error("empty --model-location, abort") return MirCode.RC_CMD_INVALID_ARGS - src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=False) + src_typ_rev_tid = revs_parser.parse_single_arg_rev(src_revs, need_tid=True) dst_typ_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) if not work_dir: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty work_dir') @@ -221,61 +223,46 @@ def run_with_args(work_dir: str, task_id = dst_typ_rev_tid.tid if not executant_name: executant_name = f"default-training-{task_id}" - if not tensorboard_dir: - tensorboard_dir = os.path.join(work_dir, 'out', 'tensorboard') - asset_dir = asset_cache_dir or os.path.join(work_dir, 'in', 'assets') - - # if have model_hash, export model - pretrained_model_names = _prepare_pretrained_models(model_location=model_upload_location, - model_hash=pretrained_model_hash, - dst_model_dir=os.path.join(work_dir, 'in', 'models')) - - # get train_ids and val_ids - train_ids = set() # type: Set[str] - val_ids = set() # type: Set[str] - unused_ids = set() # type: Set[str] - mir_metadatas: mirpb.MirMetadatas = mir_storage_ops.MirStorageOps.load_single_storage( - mir_root=mir_root, - mir_branch=src_typ_rev_tid.rev, - mir_task_id=src_typ_rev_tid.tid, - ms=mirpb.MirStorage.MIR_METADATAS) - for asset_id, asset_attr in mir_metadatas.attributes.items(): - if asset_attr.tvt_type == mirpb.TvtTypeTraining: - train_ids.add(asset_id) - elif asset_attr.tvt_type == mirpb.TvtTypeValidation: - val_ids.add(asset_id) - else: - unused_ids.add(asset_id) - if not train_ids: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='no training set') - if not val_ids: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='no validation set') - - if not unused_ids: - logging.info(f"training: {len(train_ids)}, validation: {len(val_ids)}") - else: - logging.warning(f"training: {len(train_ids)}, validation: {len(val_ids)}" f"unused: {len(unused_ids)}") - - # export - logging.info("exporting assets") + # setting up paths. os.makedirs(work_dir, exist_ok=True) work_dir_in = os.path.join(work_dir, "in") - work_dir_annotations = os.path.join(work_dir_in, 'annotations') - os.makedirs(work_dir_annotations, exist_ok=True) - work_dir_out = os.path.join(work_dir, "out") - os.makedirs(work_dir_out, exist_ok=True) - out_model_dir = os.path.join(work_dir, 'out', 'models') - os.makedirs(out_model_dir, exist_ok=True) - - os.makedirs(asset_dir, exist_ok=True) - os.makedirs(tensorboard_dir, exist_ok=True) + prepare_executant_env(work_dir_in=work_dir_in, + work_dir_out=work_dir_out, + asset_cache_dir=asset_cache_dir, + tensorboard_dir=tensorboard_dir) + + asset_dir = os.path.join(work_dir_in, 'assets') + work_dir_pred = os.path.join(work_dir_in, 'predictions') + work_dir_gt = os.path.join(work_dir_in, 'annotations') + tensorboard_dir = os.path.join(work_dir_out, 'tensorboard') + + docker_log_dst = os.path.join(tensorboard_dir, "executor.log") + docker_log_src = os.path.join(work_dir_out, mir_settings.EXECUTOR_OUTLOG_NAME) + open(docker_log_src, 'w').close() + os.symlink(docker_log_src, docker_log_dst) + + # if have model_hash_stage, export model + pretrained_model_stage_and_names = _prepare_pretrained_models(model_location=model_upload_location, + model_hash_stage=pretrained_model_hash_stage, + dst_model_dir=os.path.join(work_dir_in, 'models')) + + mir_metadatas: mirpb.MirMetadatas + mir_annotations: mirpb.MirAnnotations + [mir_metadatas, mir_annotations] = mir_storage_ops.MirStorageOps.load_multiple_storages( + mir_root=mir_root, + mir_branch=src_typ_rev_tid.rev, + mir_task_id=src_typ_rev_tid.tid, + ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS], + ) + # export + logging.info("exporting assets") # type names to type ids # ['cat', 'person'] -> [4, 2] - cls_mgr = class_ids.ClassIdManager(mir_root=mir_root) + cls_mgr = class_ids.load_or_create_userlabels(mir_root=mir_root) type_ids_list, unknown_names = cls_mgr.id_for_names(class_names) if not type_ids_list: logging.info(f"type ids empty, please check config file: {config_file}") @@ -284,46 +271,32 @@ def run_with_args(work_dir: str, raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message=f"unknown class names: {unknown_names}") - if not context.check_class_ids(mir_root=mir_root, current_class_ids=type_ids_list): - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='user class ids mismatch') - type_id_idx_mapping = {type_id: index for (index, type_id) in enumerate(type_ids_list)} - - # export train set - data_exporter.export(mir_root=mir_root, - assets_location=media_location, - class_type_ids=type_id_idx_mapping, - asset_ids=train_ids, - asset_dir=asset_dir, - annotation_dir=work_dir_annotations, - need_ext=True, - need_id_sub_folder=True, - base_branch=src_typ_rev_tid.rev, - base_task_id=src_typ_rev_tid.tid, - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK, - index_file_path=os.path.join(work_dir_in, 'train-index.tsv'), - index_assets_prefix='/in/assets', - index_annotations_prefix='/in/annotations') - - # export validation set - data_exporter.export(mir_root=mir_root, - assets_location=media_location, - class_type_ids=type_id_idx_mapping, - asset_ids=val_ids, - asset_dir=asset_dir, - annotation_dir=work_dir_annotations, - need_ext=True, - need_id_sub_folder=True, - base_branch=src_typ_rev_tid.rev, - base_task_id=src_typ_rev_tid.tid, - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK, - index_file_path=os.path.join(work_dir_in, 'val-index.tsv'), - index_assets_prefix='/in/assets', - index_annotations_prefix='/in/annotations') - - logging.info("starting train docker container") - - available_gpu_id = config.get(mir_settings.TASK_CONTEXT_KEY, {}).get('available_gpu_id', '') + anno_format, asset_format = exporter.parse_export_type(type_str=executor_config.get('export_format', '')) + ec = mirpb.ExportConfig(asset_format=asset_format, + asset_dir=asset_dir, + asset_index_file=os.path.join(work_dir_in, "idx-assets.tsv"), + asset_index_prefix="/in/assets", + media_location=media_location, + need_sub_folder=True, + anno_format=anno_format, + gt_dir=work_dir_gt, + gt_index_file=os.path.join(work_dir_in, "idx-gt.tsv"), + gt_index_prefix="/in/annotations", + pred_dir=work_dir_pred, + pred_index_file=os.path.join(work_dir_in, "idx-pred.tsv"), + pred_index_prefix="/in/predictions", + tvt_index_dir=work_dir_in,) + export_code = exporter.export_mirdatas_to_dir( + mir_metadatas=mir_metadatas, + ec=ec, + mir_annotations=mir_annotations, + class_ids_mapping=type_id_idx_mapping, + cls_id_mgr=cls_mgr, + ) + if export_code != MirCode.RC_OK: + return export_code + logging.info("finish exporting, starting train docker container") # generate configs out_config_path = os.path.join(work_dir_in, "config.yaml") @@ -331,34 +304,29 @@ def run_with_args(work_dir: str, executor_config=executor_config, out_config_path=out_config_path, task_id=task_id, - pretrained_model_params=[os.path.join('/in/models', name) for name in pretrained_model_names]) - mir_utils.generate_training_env_config_file(task_id=task_id, - env_config_file_path=os.path.join(work_dir_in, 'env.yaml')) - - # start train docker and wait - path_binds = [] - path_binds.append(f"-v{work_dir_in}:/in") # annotations, models, train-index.tsv, val-index.tsv, config.yaml - path_binds.append(f"-v{asset_dir}:/in/assets:ro") # assets - path_binds.append(f"-v{work_dir_out}:/out") - path_binds.append(f"-v{tensorboard_dir}:/out/tensorboard") - - cmd = ['nvidia-docker', 'run', '--rm', f"--shm-size={_get_shm_size(executor_config=executor_config)}"] - cmd.extend(path_binds) - if available_gpu_id: - cmd.extend(['--gpus', f"\"device={available_gpu_id}\""]) - cmd.extend(['--user', f"{os.getuid()}:{os.getgid()}"]) # run as current user - cmd.extend(['--name', f"{executant_name}"]) # executor name used to stop executor - cmd.append(executor) + pretrained_model_params=[os.path.join('/in/models', name) for name in pretrained_model_stage_and_names]) + env_config.generate_training_env_config_file(task_id=task_id, + env_config_file_path=os.path.join(work_dir_in, 'env.yaml')) + task_config = config.get(mir_settings.TASK_CONTEXT_KEY, {}) task_code = MirCode.RC_OK - return_msg = '' + return_msg = "" try: - _run_train_cmd(cmd, out_log_path=os.path.join(work_dir_out, mir_settings.EXECUTOR_OUTLOG_NAME)) + run_docker_executant( + work_dir_in=work_dir_in, + work_dir_out=work_dir_out, + executor=executor, + executant_name=executant_name, + executor_config=executor_config, + gpu_id=task_config.get('available_gpu_id', ""), + run_as_root=run_as_root, + task_config=task_config, + ) except CalledProcessError as e: logging.warning(f"training exception: {e}") # don't exit, proceed if model exists task_code = MirCode.RC_CMD_CONTAINER_ERROR - return_msg = mir_utils.collect_executor_outlog_tail(work_dir=work_dir) + return_msg = env_config.collect_executor_outlog_tail(work_dir=work_dir) # write executor tail to tensorboard if return_msg: @@ -366,27 +334,27 @@ def run_with_args(work_dir: str, tb_writer.add_text(tag='executor tail', text_string=f"```\n{return_msg}\n```", walltime=time.time()) # gen task_context - task_context = { + task_context = task_config + task_context.update({ 'src_revs': src_revs, 'dst_rev': dst_rev, 'executor': executor, mir_settings.PRODUCER_KEY: mir_settings.PRODUCER_NAME, mir_settings.TASK_CONTEXT_PARAMETERS_KEY: task_parameters - } + }) # save model - logging.info("saving models") - model_sha1, model_mAP = _process_model_storage(out_root=work_dir_out, - model_upload_location=model_upload_location, - executor_config=executor_config, - task_context=task_context) + logging.info(f"saving models:\n task_context: {task_context}") + model_storage = _find_and_save_model(out_root=work_dir_out, + model_upload_location=model_upload_location, + executor_config=executor_config, + task_context=task_context) # commit task task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeTraining, task_id=dst_typ_rev_tid.tid, message='training', - model_mAP=model_mAP, - model_hash=model_sha1, + model_meta=model_storage.get_model_meta(), return_code=task_code, return_msg=return_msg, serialized_task_parameters=task_parameters, @@ -401,7 +369,10 @@ def run_with_args(work_dir: str, mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, mir_branch=dst_typ_rev_tid.rev, his_branch=src_typ_rev_tid.rev, - mir_datas={}, + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mirpb.MirMetadatas(), + mirpb.MirStorage.MIR_ANNOTATIONS: mirpb.MirAnnotations() + }, task=task) logging.info("training done") @@ -425,7 +396,7 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar type=str, help="media storage location for models") train_arg_parser.add_argument('--model-hash', - dest='model_hash', + dest='model_hash_stage', type=str, required=False, help='model hash to be used') @@ -434,6 +405,7 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar required=False, dest='asset_cache_dir', type=str, + default='', help='asset cache directory') train_arg_parser.add_argument("--executor", required=True, @@ -465,4 +437,8 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar type=str, required=False, help="tensorboard log directory") + train_arg_parser.add_argument("--run-as-root", + dest="run_as_root", + action='store_true', + help="run executor as root user") train_arg_parser.set_defaults(func=CmdTrain) diff --git a/ymir/command/mir/protos/mir_command_pb2.py b/ymir/command/mir/protos/mir_command_pb2.py index 109da61112..0b6c0021d2 100644 --- a/ymir/command/mir/protos/mir_command_pb2.py +++ b/ymir/command/mir/protos/mir_command_pb2.py @@ -18,9 +18,9 @@ name='mir_command.proto', package='mir.command', syntax='proto3', - serialized_options=None, + serialized_options=b'Z\007/protos', create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x11mir_command.proto\x12\x0bmir.command\"\xa1\x01\n\x0cMirMetadatas\x12=\n\nattributes\x18\x01 \x03(\x0b\x32).mir.command.MirMetadatas.AttributesEntry\x1aR\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.mir.command.MetadataAttributes:\x02\x38\x01\"\xe0\x01\n\x12MetadataAttributes\x12\x14\n\x0c\x64\x61taset_name\x18\x01 \x01(\t\x12)\n\ttimestamp\x18\x02 \x01(\x0b\x32\x16.mir.command.Timestamp\x12&\n\x08tvt_type\x18\x03 \x01(\x0e\x32\x14.mir.command.TvtType\x12*\n\nasset_type\x18\x04 \x01(\x0e\x32\x16.mir.command.AssetType\x12\r\n\x05width\x18\x05 \x01(\x05\x12\x0e\n\x06height\x18\x06 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x07 \x01(\x05\",\n\tTimestamp\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"\xce\x01\n\x0eMirAnnotations\x12J\n\x10task_annotations\x18\x01 \x03(\x0b\x32\x30.mir.command.MirAnnotations.TaskAnnotationsEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1aZ\n\x14TaskAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTaskAnnotations:\x02\x38\x01\"\xca\x01\n\x15SingleTaskAnnotations\x12S\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x38.mir.command.SingleTaskAnnotations.ImageAnnotationsEntry\x1a\\\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command.SingleImageAnnotations:\x02\x38\x01\"F\n\x16SingleImageAnnotations\x12,\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x17.mir.command.Annotation\"\\\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1e\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x11.mir.command.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"\xd0\x02\n\x0bMirKeywords\x12\x38\n\x08keywords\x18\x01 \x03(\x0b\x32&.mir.command.MirKeywords.KeywordsEntry\x12T\n\x17index_predifined_keyids\x18\x06 \x03(\x0b\x32\x33.mir.command.MirKeywords.IndexPredifinedKeyidsEntry\x1a\x46\n\rKeywordsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.mir.command.Keywords:\x02\x38\x01\x1aQ\n\x1aIndexPredifinedKeyidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.mir.command.Assets:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"\x1b\n\x06\x41ssets\x12\x11\n\tasset_ids\x18\x01 \x03(\t\"B\n\x08Keywords\x12\x19\n\x11predifined_keyids\x18\x01 \x03(\x05\x12\x1b\n\x13\x63ustomized_keywords\x18\x02 \x03(\t\"\x92\x01\n\x08MirTasks\x12/\n\x05tasks\x18\x01 \x03(\x0b\x32 .mir.command.MirTasks.TasksEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a?\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.mir.command.Task:\x02\x38\x01\"\xda\x03\n\x04Task\x12#\n\x04type\x18\x01 \x01(\x0e\x32\x15.mir.command.TaskType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07task_id\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\x12%\n\x05model\x18\x06 \x01(\x0b\x32\x16.mir.command.ModelMeta\x12:\n\runknown_types\x18\x07 \x03(\x0b\x32#.mir.command.Task.UnknownTypesEntry\x12\x13\n\x0breturn_code\x18\x08 \x01(\x05\x12\x12\n\nreturn_msg\x18\t \x01(\t\x12+\n\nevaluation\x18\n \x01(\x0b\x32\x17.mir.command.Evaluation\x12\"\n\x1aserialized_task_parameters\x18\x66 \x01(\t\x12\"\n\x1aserialized_executor_config\x18g \x01(\t\x12\x10\n\x08src_revs\x18h \x01(\t\x12\x0f\n\x07\x64st_rev\x18i \x01(\t\x12\x10\n\x08\x65xecutor\x18j \x01(\t\x1a\x33\n\x11UnknownTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x64\x10\x65J\x04\x08\x65\x10\x66\"P\n\tModelMeta\x12\x12\n\nmodel_hash\x18\x01 \x01(\t\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\"\xe8\x01\n\nEvaluation\x12+\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x1b.mir.command.EvaluateConfig\x12L\n\x13\x64\x61taset_evaluations\x18\x02 \x03(\x0b\x32/.mir.command.Evaluation.DatasetEvaluationsEntry\x1a_\n\x17\x44\x61tasetEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.mir.command.SingleDatasetEvaluation:\x02\x38\x01\"\x85\x01\n\x0e\x45valuateConfig\x12\x15\n\rgt_dataset_id\x18\x01 \x01(\t\x12\x18\n\x10pred_dataset_ids\x18\x02 \x03(\t\x12\x10\n\x08\x63onf_thr\x18\x03 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x04 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x05 \x01(\x08\"\xca\x02\n\x17SingleDatasetEvaluation\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12\x15\n\rgt_dataset_id\x18\x02 \x01(\t\x12\x17\n\x0fpred_dataset_id\x18\x03 \x01(\t\x12Q\n\x0fiou_evaluations\x18\x04 \x03(\x0b\x32\x38.mir.command.SingleDatasetEvaluation.IouEvaluationsEntry\x12\x41\n\x17iou_averaged_evaluation\x18\x05 \x01(\x0b\x32 .mir.command.SingleIouEvaluation\x1aW\n\x13IouEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .mir.command.SingleIouEvaluation:\x02\x38\x01\"\xb0\x03\n\x13SingleIouEvaluation\x12K\n\x0e\x63i_evaluations\x18\x01 \x03(\x0b\x32\x33.mir.command.SingleIouEvaluation.CiEvaluationsEntry\x12\x42\n\x16\x63i_averaged_evaluation\x18\x02 \x01(\x0b\x32\".mir.command.SingleTopicEvaluation\x12Q\n\x11topic_evaluations\x18\x03 \x03(\x0b\x32\x36.mir.command.SingleIouEvaluation.TopicEvaluationsEntry\x1aX\n\x12\x43iEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTopicEvaluation:\x02\x38\x01\x1a[\n\x15TopicEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTopicEvaluation:\x02\x38\x01\"~\n\x15SingleTopicEvaluation\x12\n\n\x02\x61p\x18\x01 \x01(\x02\x12\n\n\x02\x61r\x18\x02 \x01(\x02\x12\n\n\x02tp\x18\x03 \x01(\x05\x12\n\n\x02\x66p\x18\x04 \x01(\x05\x12\n\n\x02\x66n\x18\x05 \x01(\x05\x12)\n\x08pr_curve\x18\x06 \x03(\x0b\x32\x17.mir.command.FloatPoint\"\"\n\nFloatPoint\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"\xa5\x04\n\nMirContext\x12\x12\n\nimages_cnt\x18\x01 \x01(\x05\x12\x1b\n\x13negative_images_cnt\x18\x02 \x01(\x05\x12#\n\x1bproject_negative_images_cnt\x18\x03 \x01(\x05\x12O\n\x15predefined_keyids_cnt\x18\x04 \x03(\x0b\x32\x30.mir.command.MirContext.PredefinedKeyidsCntEntry\x12^\n\x1dproject_predefined_keyids_cnt\x18\x05 \x03(\x0b\x32\x37.mir.command.MirContext.ProjectPredefinedKeyidsCntEntry\x12S\n\x17\x63ustomized_keywords_cnt\x18\x06 \x03(\x0b\x32\x32.mir.command.MirContext.CustomizedKeywordsCntEntry\x1a:\n\x18PredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x41\n\x1fProjectPredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a<\n\x1a\x43ustomizedKeywordsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\x88\x02\n\tAssetType\x12\x14\n\x10\x41ssetTypeUnknown\x10\x00\x12\x16\n\x12\x41ssetTypeImageJpeg\x10\x01\x12\x15\n\x11\x41ssetTypeImagePng\x10\x02\x12\x1a\n\x16\x41ssetTypeImagePixelMat\x10\x03\x12\x19\n\x15\x41ssetTypeImageYuv420p\x10\x04\x12\x1a\n\x16\x41ssetTypeImageYuv420sp\x10\x05\x12\x19\n\x15\x41ssetTypeImageYuv422p\x10\x06\x12\x1a\n\x16\x41ssetTypeImageYuv422sp\x10\x07\x12\x15\n\x11\x41ssetTypeImageBmp\x10\x08\x12\x15\n\x11\x41ssetTypeVideoMp4\x10\x65*\xd3\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x10\n\x0cTaskTypeInit\x10\x0c\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x14\n\x10TaskTypeEvaluate\x10\x10\"\x04\x08\x0e\x10\x0e\"\x04\x08\x0f\x10\x0f*\x87\x01\n\tTaskState\x12\x14\n\x10TaskStateUnknown\x10\x00\x12\x14\n\x10TaskStatePending\x10\x01\x12\x14\n\x10TaskStateRunning\x10\x02\x12\x11\n\rTaskStateDone\x10\x03\x12\x12\n\x0eTaskStateError\x10\x04\x12\x11\n\rTaskStateMiss\x10\x05*L\n\x08Sha1Type\x12\x15\n\x11SHA1_TYPE_UNKNOWN\x10\x00\x12\x13\n\x0fSHA1_TYPE_ASSET\x10\x01\x12\x14\n\x10SHA1_TYPE_COMMIT\x10\x02*f\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03\x12\x0f\n\x0bMIR_CONTEXT\x10\x04*<\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x62\x06proto3' + serialized_pb=b'\n\x11mir_command.proto\x12\x0bmir.command\"\xa1\x01\n\x0cMirMetadatas\x12=\n\nattributes\x18\x01 \x03(\x0b\x32).mir.command.MirMetadatas.AttributesEntry\x1aR\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.mir.command.MetadataAttributes:\x02\x38\x01\"\xfc\x01\n\x12MetadataAttributes\x12)\n\ttimestamp\x18\x02 \x01(\x0b\x32\x16.mir.command.Timestamp\x12&\n\x08tvt_type\x18\x03 \x01(\x0e\x32\x14.mir.command.TvtType\x12*\n\nasset_type\x18\x04 \x01(\x0e\x32\x16.mir.command.AssetType\x12\r\n\x05width\x18\x05 \x01(\x05\x12\x0e\n\x06height\x18\x06 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x07 \x01(\x05\x12\x11\n\tbyte_size\x18\x08 \x01(\x05\x12\x17\n\x0forigin_filename\x18\t \x01(\tJ\x04\x08\x01\x10\x02\",\n\tTimestamp\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"\x9a\x02\n\x0eMirAnnotations\x12\x38\n\x0cground_truth\x18\x03 \x01(\x0b\x32\".mir.command.SingleTaskAnnotations\x12\x36\n\nprediction\x18\x04 \x01(\x0b\x32\".mir.command.SingleTaskAnnotations\x12<\n\timage_cks\x18\x05 \x03(\x0b\x32).mir.command.MirAnnotations.ImageCksEntry\x1aL\n\rImageCksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.mir.command.SingleImageCks:\x02\x38\x01J\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03\"\x84\x04\n\x15SingleTaskAnnotations\x12S\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x38.mir.command.SingleTaskAnnotations.ImageAnnotationsEntry\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12#\n\x04type\x18\x03 \x01(\x0e\x32\x15.mir.command.AnnoType\x12\x16\n\x0etask_class_ids\x18\x04 \x03(\x05\x12H\n\x0cmap_id_color\x18\x05 \x03(\x0b\x32\x32.mir.command.SingleTaskAnnotations.MapIdColorEntry\x12\x16\n\x0e\x65val_class_ids\x18\n \x03(\x05\x12%\n\x05model\x18\x0b \x01(\x0b\x32\x16.mir.command.ModelMeta\x12\x17\n\x0f\x65xecutor_config\x18\x0c \x01(\t\x1a\\\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command.SingleImageAnnotations:\x02\x38\x01\x1aH\n\x0fMapIdColorEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.mir.command.IntPoint:\x02\x38\x01\"\xbf\x01\n\x16SingleImageAnnotations\x12,\n\x05\x62oxes\x18\x02 \x03(\x0b\x32\x1d.mir.command.ObjectAnnotation\x12/\n\x08polygons\x18\x03 \x03(\x0b\x32\x1d.mir.command.ObjectAnnotation\x12)\n\x04mask\x18\x04 \x01(\x0b\x32\x1b.mir.command.MaskAnnotation\x12\x15\n\rimg_class_ids\x18\x05 \x03(\x05J\x04\x08\x01\x10\x02\"\x86\x01\n\x0eSingleImageCks\x12\x31\n\x03\x63ks\x18\x01 \x03(\x0b\x32$.mir.command.SingleImageCks.CksEntry\x12\x15\n\rimage_quality\x18\x02 \x01(\x02\x1a*\n\x08\x43ksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"R\n\x0eMaskAnnotation\x12\x15\n\rsemantic_mask\x18\x01 \x01(\x0c\x12\x15\n\rinstance_mask\x18\x02 \x01(\x0c\x12\x12\n\nobject_ids\x18\x03 \x03(\x05\"\xdb\x02\n\x10ObjectAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1e\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x11.mir.command.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\x12\x14\n\x0c\x61nno_quality\x18\x05 \x01(\x02\x12\x35\n\x04tags\x18\x06 \x03(\x0b\x32\'.mir.command.ObjectAnnotation.TagsEntry\x12,\n\x02\x63m\x18\x07 \x01(\x0e\x32 .mir.command.ConfusionMatrixType\x12\x13\n\x0b\x64\x65t_link_id\x18\x08 \x01(\x05\x12\x12\n\nclass_name\x18\t \x01(\t\x12&\n\x07polygon\x18\n \x03(\x0b\x32\x15.mir.command.IntPoint\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"H\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\x12\x14\n\x0crotate_angle\x18\x05 \x01(\x02\"\x89\x02\n\x0bMirKeywords\x12+\n\x08pred_idx\x18\x07 \x01(\x0b\x32\x19.mir.command.CiTagToIndex\x12)\n\x06gt_idx\x18\x08 \x01(\x0b\x32\x19.mir.command.CiTagToIndex\x12\x33\n\x06\x63k_idx\x18\t \x03(\x0b\x32#.mir.command.MirKeywords.CkIdxEntry\x1aI\n\nCkIdxEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.mir.command.AssetAnnoIndex:\x02\x38\x01J\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"\x8b\x02\n\x0c\x43iTagToIndex\x12/\n\x03\x63is\x18\x01 \x03(\x0b\x32\".mir.command.CiTagToIndex.CisEntry\x12\x31\n\x04tags\x18\x02 \x03(\x0b\x32#.mir.command.CiTagToIndex.TagsEntry\x1aM\n\x08\x43isEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.mir.command.MapStringToInt32List:\x02\x38\x01\x1aH\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.mir.command.AssetAnnoIndex:\x02\x38\x01\"\x1f\n\nStringList\x12\x11\n\tasset_ids\x18\x01 \x03(\t\"\x9d\x01\n\x14MapStringToInt32List\x12>\n\x07key_ids\x18\x01 \x03(\x0b\x32-.mir.command.MapStringToInt32List.KeyIdsEntry\x1a\x45\n\x0bKeyIdsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.mir.command.Int32List:\x02\x38\x01\"\x18\n\tInt32List\x12\x0b\n\x03ids\x18\x01 \x03(\x05\"\xb5\x02\n\x0e\x41ssetAnnoIndex\x12@\n\x0b\x61sset_annos\x18\x01 \x03(\x0b\x32+.mir.command.AssetAnnoIndex.AssetAnnosEntry\x12@\n\x0bsub_indexes\x18\x02 \x03(\x0b\x32+.mir.command.AssetAnnoIndex.SubIndexesEntry\x1aI\n\x0f\x41ssetAnnosEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.mir.command.Int32List:\x02\x38\x01\x1aT\n\x0fSubIndexesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x30\n\x05value\x18\x02 \x01(\x0b\x32!.mir.command.MapStringToInt32List:\x02\x38\x01\"\x92\x01\n\x08MirTasks\x12/\n\x05tasks\x18\x01 \x03(\x0b\x32 .mir.command.MirTasks.TasksEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a?\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.mir.command.Task:\x02\x38\x01\"\xed\x03\n\x04Task\x12#\n\x04type\x18\x01 \x01(\x0e\x32\x15.mir.command.TaskType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07task_id\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x05\x12%\n\x05model\x18\x06 \x01(\x0b\x32\x16.mir.command.ModelMeta\x12\x13\n\x0breturn_code\x18\x08 \x01(\x05\x12\x12\n\nreturn_msg\x18\t \x01(\t\x12+\n\nevaluation\x18\n \x01(\x0b\x32\x17.mir.command.Evaluation\x12\x32\n\tnew_types\x18\x0b \x03(\x0b\x32\x1f.mir.command.Task.NewTypesEntry\x12\x17\n\x0fnew_types_added\x18\x0c \x01(\x08\x12\"\n\x1aserialized_task_parameters\x18\x66 \x01(\t\x12\"\n\x1aserialized_executor_config\x18g \x01(\t\x12\x10\n\x08src_revs\x18h \x01(\t\x12\x0f\n\x07\x64st_rev\x18i \x01(\t\x12\x10\n\x08\x65xecutor\x18j \x01(\t\x1a/\n\rNewTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x07\x10\x08J\x04\x08\x64\x10\x65J\x04\x08\x65\x10\x66\"\xfa\x01\n\tModelMeta\x12\x12\n\nmodel_hash\x18\x01 \x01(\t\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\x12\x32\n\x06stages\x18\x04 \x03(\x0b\x32\".mir.command.ModelMeta.StagesEntry\x12\x17\n\x0f\x62\x65st_stage_name\x18\x05 \x01(\t\x12\x13\n\x0b\x63lass_names\x18\x06 \x03(\t\x1a\x46\n\x0bStagesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.mir.command.ModelStage:\x02\x38\x01\"O\n\nModelStage\x12\x12\n\nstage_name\x18\x01 \x01(\t\x12\r\n\x05\x66iles\x18\x02 \x03(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x05\x12\x0b\n\x03mAP\x18\x04 \x01(\x02\"\xf0\x02\n\nEvaluation\x12+\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x1b.mir.command.EvaluateConfig\x12@\n\x12\x64\x61taset_evaluation\x18\x03 \x01(\x0b\x32$.mir.command.SingleDatasetEvaluation\x12\x35\n\x07main_ck\x18\x04 \x01(\x0b\x32$.mir.command.SingleDatasetEvaluation\x12\x34\n\x07sub_cks\x18\x05 \x03(\x0b\x32#.mir.command.Evaluation.SubCksEntry\x12+\n\x05state\x18\x06 \x01(\x0e\x32\x1c.mir.command.EvaluationState\x1aS\n\x0bSubCksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.mir.command.SingleDatasetEvaluation:\x02\x38\x01J\x04\x08\x02\x10\x03\"\x8a\x01\n\x0e\x45valuateConfig\x12\x10\n\x08\x63onf_thr\x18\x03 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x04 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x05 \x01(\x08\x12\x11\n\tclass_ids\x18\x07 \x03(\x05\x12\x0f\n\x07main_ck\x18\x08 \x01(\tJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x06\x10\x07\"\xa6\x02\n\x17SingleDatasetEvaluation\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12Q\n\x0fiou_evaluations\x18\x04 \x03(\x0b\x32\x38.mir.command.SingleDatasetEvaluation.IouEvaluationsEntry\x12\x41\n\x17iou_averaged_evaluation\x18\x05 \x01(\x0b\x32 .mir.command.SingleIouEvaluation\x1aW\n\x13IouEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .mir.command.SingleIouEvaluation:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"\x8a\x02\n\x13SingleIouEvaluation\x12K\n\x0e\x63i_evaluations\x18\x01 \x03(\x0b\x32\x33.mir.command.SingleIouEvaluation.CiEvaluationsEntry\x12\x44\n\x16\x63i_averaged_evaluation\x18\x02 \x01(\x0b\x32$.mir.command.SingleEvaluationElement\x1aZ\n\x12\x43iEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.mir.command.SingleEvaluationElement:\x02\x38\x01J\x04\x08\x03\x10\x04\"\x80\x01\n\x17SingleEvaluationElement\x12\n\n\x02\x61p\x18\x01 \x01(\x02\x12\n\n\x02\x61r\x18\x02 \x01(\x02\x12\n\n\x02tp\x18\x03 \x01(\x05\x12\n\n\x02\x66p\x18\x04 \x01(\x05\x12\n\n\x02\x66n\x18\x05 \x01(\x05\x12)\n\x08pr_curve\x18\x06 \x03(\x0b\x32\x17.mir.command.FloatPoint\"+\n\x08IntPoint\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01z\x18\x03 \x01(\x05\"-\n\nFloatPoint\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"\xca\x02\n\nMirContext\x12\x12\n\nimages_cnt\x18\x01 \x01(\x05\x12\x34\n\x07\x63ks_cnt\x18\x06 \x03(\x0b\x32#.mir.command.MirContext.CksCntEntry\x12\x1a\n\x12total_asset_mbytes\x18\x0b \x01(\x05\x12*\n\npred_stats\x18\x64 \x01(\x0b\x32\x16.mir.command.AnnoStats\x12(\n\x08gt_stats\x18\x65 \x01(\x0b\x32\x16.mir.command.AnnoStats\x1aJ\n\x0b\x43ksCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.mir.command.SingleMapCount:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\tJ\x04\x08\t\x10\nJ\x04\x08\n\x10\x0bJ\x04\x08\x0c\x10\r\"\x86\x01\n\x0eSingleMapCount\x12\x0b\n\x03\x63nt\x18\x01 \x01(\x05\x12\x38\n\x07sub_cnt\x18\x02 \x03(\x0b\x32\'.mir.command.SingleMapCount.SubCntEntry\x1a-\n\x0bSubCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\xf8\x02\n\tAnnoStats\x12\x11\n\ttotal_cnt\x18\x01 \x01(\x05\x12\x1a\n\x12positive_asset_cnt\x18\x02 \x01(\x05\x12\x1a\n\x12negative_asset_cnt\x18\x03 \x01(\x05\x12\x35\n\x08tags_cnt\x18\x07 \x03(\x0b\x32#.mir.command.AnnoStats.TagsCntEntry\x12>\n\rclass_ids_cnt\x18\x08 \x03(\x0b\x32\'.mir.command.AnnoStats.ClassIdsCntEntry\x12\x16\n\x0e\x65val_class_ids\x18\t \x03(\x05\x1aK\n\x0cTagsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.mir.command.SingleMapCount:\x02\x38\x01\x1a\x32\n\x10\x43lassIdsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"\x83\x03\n\x0c\x45xportConfig\x12.\n\x0c\x61sset_format\x18\x01 \x01(\x0e\x32\x18.mir.command.AssetFormat\x12\x11\n\tasset_dir\x18\x02 \x01(\t\x12\x18\n\x10\x61sset_index_file\x18\x03 \x01(\t\x12\x1a\n\x12\x61sset_index_prefix\x18\x04 \x01(\t\x12\x16\n\x0emedia_location\x18\x05 \x01(\t\x12\x17\n\x0fneed_sub_folder\x18\x06 \x01(\x08\x12,\n\x0b\x61nno_format\x18\x32 \x01(\x0e\x32\x17.mir.command.AnnoFormat\x12\x0e\n\x06gt_dir\x18\x33 \x01(\t\x12\x15\n\rgt_index_file\x18\x34 \x01(\t\x12\x17\n\x0fgt_index_prefix\x18\x35 \x01(\t\x12\x10\n\x08pred_dir\x18\x36 \x01(\t\x12\x17\n\x0fpred_index_file\x18\x37 \x01(\t\x12\x19\n\x11pred_index_prefix\x18\x38 \x01(\t\x12\x15\n\rtvt_index_dir\x18\x39 \x01(\t*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\x88\x02\n\tAssetType\x12\x14\n\x10\x41ssetTypeUnknown\x10\x00\x12\x16\n\x12\x41ssetTypeImageJpeg\x10\x01\x12\x15\n\x11\x41ssetTypeImagePng\x10\x02\x12\x1a\n\x16\x41ssetTypeImagePixelMat\x10\x03\x12\x19\n\x15\x41ssetTypeImageYuv420p\x10\x04\x12\x1a\n\x16\x41ssetTypeImageYuv420sp\x10\x05\x12\x19\n\x15\x41ssetTypeImageYuv422p\x10\x06\x12\x1a\n\x16\x41ssetTypeImageYuv422sp\x10\x07\x12\x15\n\x11\x41ssetTypeImageBmp\x10\x08\x12\x15\n\x11\x41ssetTypeVideoMp4\x10\x65*\xf8\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x10\n\x0cTaskTypeInit\x10\x0c\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x15\n\x11TaskTypeCopyModel\x10\x0e\x12\x18\n\x14TaskTypeDatasetInfer\x10\x0f\x12\x14\n\x10TaskTypeEvaluate\x10\x10*\x87\x01\n\tTaskState\x12\x14\n\x10TaskStateUnknown\x10\x00\x12\x14\n\x10TaskStatePending\x10\x01\x12\x14\n\x10TaskStateRunning\x10\x02\x12\x11\n\rTaskStateDone\x10\x03\x12\x12\n\x0eTaskStateError\x10\x04\x12\x11\n\rTaskStateMiss\x10\x05*L\n\x08Sha1Type\x12\x15\n\x11SHA1_TYPE_UNKNOWN\x10\x00\x12\x13\n\x0fSHA1_TYPE_ASSET\x10\x01\x12\x14\n\x10SHA1_TYPE_COMMIT\x10\x02*f\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03\x12\x0f\n\x0bMIR_CONTEXT\x10\x04*\x87\x01\n\nAnnoFormat\x12\x14\n\x10\x41\x46_NO_ANNOTATION\x10\x00\x12\x15\n\x11\x41\x46_DET_PASCAL_VOC\x10\x01\x12\x13\n\x0f\x41\x46_DET_ARK_JSON\x10\x02\x12\x12\n\x0e\x41\x46_DET_LS_JSON\x10\x03\x12\x12\n\x0e\x41\x46_SEG_POLYGON\x10\x04\x12\x0f\n\x0b\x41\x46_SEG_MASK\x10\x05*6\n\x0b\x41ssetFormat\x12\x0e\n\nAF_UNKNOWN\x10\x00\x12\n\n\x06\x41\x46_RAW\x10\x01\x12\x0b\n\x07\x41\x46_LMDB\x10\x02*]\n\x08\x41nnoType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\x0c\n\x08\x41T_CLASS\x10\x01\x12\x0e\n\nAT_DET_BOX\x10\x02\x12\x12\n\x0e\x41T_SEG_POLYGON\x10\x03\x12\x0f\n\x0b\x41T_SEG_MASK\x10\x04*d\n\x13\x43onfusionMatrixType\x12\n\n\x06NotSet\x10\x00\x12\x06\n\x02TP\x10\x01\x12\x06\n\x02\x46P\x10\x02\x12\x06\n\x02\x46N\x10\x03\x12\x06\n\x02TN\x10\x04\x12\x0b\n\x07Unknown\x10\x05\x12\x07\n\x03MTP\x10\x0b\x12\x0b\n\x07IGNORED\x10\x0c*p\n\x0f\x45valuationState\x12\x0e\n\nES_NOT_SET\x10\x00\x12\x0c\n\x08\x45S_READY\x10\x01\x12\x14\n\x10\x45S_NO_GT_OR_PRED\x10\x02\x12\x14\n\x10\x45S_EXCEEDS_LIMIT\x10\x03\x12\x13\n\x0f\x45S_NO_CLASS_IDS\x10\x04\x42\tZ\x07/protosb\x06proto3' ) _TVTTYPE = _descriptor.EnumDescriptor( @@ -53,8 +53,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4102, - serialized_end=4192, + serialized_start=6730, + serialized_end=6820, ) _sym_db.RegisterEnumDescriptor(_TVTTYPE) @@ -119,8 +119,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4195, - serialized_end=4459, + serialized_start=6823, + serialized_end=7087, ) _sym_db.RegisterEnumDescriptor(_ASSETTYPE) @@ -203,15 +203,25 @@ type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='TaskTypeEvaluate', index=14, number=16, + name='TaskTypeCopyModel', index=14, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeDatasetInfer', index=15, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeEvaluate', index=16, number=16, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=4462, - serialized_end=4801, + serialized_start=7090, + serialized_end=7466, ) _sym_db.RegisterEnumDescriptor(_TASKTYPE) @@ -256,8 +266,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4804, - serialized_end=4939, + serialized_start=7469, + serialized_end=7604, ) _sym_db.RegisterEnumDescriptor(_TASKSTATE) @@ -287,8 +297,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4941, - serialized_end=5017, + serialized_start=7606, + serialized_end=7682, ) _sym_db.RegisterEnumDescriptor(_SHA1TYPE) @@ -328,43 +338,227 @@ ], containing_type=None, serialized_options=None, - serialized_start=5019, - serialized_end=5121, + serialized_start=7684, + serialized_end=7786, ) _sym_db.RegisterEnumDescriptor(_MIRSTORAGE) MirStorage = enum_type_wrapper.EnumTypeWrapper(_MIRSTORAGE) -_LABELFORMAT = _descriptor.EnumDescriptor( - name='LabelFormat', - full_name='mir.command.LabelFormat', +_ANNOFORMAT = _descriptor.EnumDescriptor( + name='AnnoFormat', + full_name='mir.command.AnnoFormat', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AF_NO_ANNOTATION', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_DET_PASCAL_VOC', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_DET_ARK_JSON', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_DET_LS_JSON', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_SEG_POLYGON', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_SEG_MASK', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7789, + serialized_end=7924, +) +_sym_db.RegisterEnumDescriptor(_ANNOFORMAT) + +AnnoFormat = enum_type_wrapper.EnumTypeWrapper(_ANNOFORMAT) +_ASSETFORMAT = _descriptor.EnumDescriptor( + name='AssetFormat', + full_name='mir.command.AssetFormat', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AF_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_RAW', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_LMDB', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7926, + serialized_end=7980, +) +_sym_db.RegisterEnumDescriptor(_ASSETFORMAT) + +AssetFormat = enum_type_wrapper.EnumTypeWrapper(_ASSETFORMAT) +_ANNOTYPE = _descriptor.EnumDescriptor( + name='AnnoType', + full_name='mir.command.AnnoType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AT_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_CLASS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_DET_BOX', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_SEG_POLYGON', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_SEG_MASK', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7982, + serialized_end=8075, +) +_sym_db.RegisterEnumDescriptor(_ANNOTYPE) + +AnnoType = enum_type_wrapper.EnumTypeWrapper(_ANNOTYPE) +_CONFUSIONMATRIXTYPE = _descriptor.EnumDescriptor( + name='ConfusionMatrixType', + full_name='mir.command.ConfusionMatrixType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='NotSet', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TP', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FP', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FN', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TN', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='Unknown', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MTP', index=6, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IGNORED', index=7, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=8077, + serialized_end=8177, +) +_sym_db.RegisterEnumDescriptor(_CONFUSIONMATRIXTYPE) + +ConfusionMatrixType = enum_type_wrapper.EnumTypeWrapper(_CONFUSIONMATRIXTYPE) +_EVALUATIONSTATE = _descriptor.EnumDescriptor( + name='EvaluationState', + full_name='mir.command.EvaluationState', filename=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( - name='NO_ANNOTATION', index=0, number=0, + name='ES_NOT_SET', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ES_READY', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ES_NO_GT_OR_PRED', index=2, number=2, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='PASCAL_VOC', index=1, number=1, + name='ES_EXCEEDS_LIMIT', index=3, number=3, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='IF_ARK', index=2, number=2, + name='ES_NO_CLASS_IDS', index=4, number=4, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=5123, - serialized_end=5183, + serialized_start=8179, + serialized_end=8291, ) -_sym_db.RegisterEnumDescriptor(_LABELFORMAT) +_sym_db.RegisterEnumDescriptor(_EVALUATIONSTATE) -LabelFormat = enum_type_wrapper.EnumTypeWrapper(_LABELFORMAT) +EvaluationState = enum_type_wrapper.EnumTypeWrapper(_EVALUATIONSTATE) TvtTypeUnknown = 0 TvtTypeTraining = 1 TvtTypeValidation = 2 @@ -393,6 +587,8 @@ TaskTypeFusion = 11 TaskTypeInit = 12 TaskTypeImportModel = 13 +TaskTypeCopyModel = 14 +TaskTypeDatasetInfer = 15 TaskTypeEvaluate = 16 TaskStateUnknown = 0 TaskStatePending = 1 @@ -408,9 +604,33 @@ MIR_KEYWORDS = 2 MIR_TASKS = 3 MIR_CONTEXT = 4 -NO_ANNOTATION = 0 -PASCAL_VOC = 1 -IF_ARK = 2 +AF_NO_ANNOTATION = 0 +AF_DET_PASCAL_VOC = 1 +AF_DET_ARK_JSON = 2 +AF_DET_LS_JSON = 3 +AF_SEG_POLYGON = 4 +AF_SEG_MASK = 5 +AF_UNKNOWN = 0 +AF_RAW = 1 +AF_LMDB = 2 +AT_UNKNOWN = 0 +AT_CLASS = 1 +AT_DET_BOX = 2 +AT_SEG_POLYGON = 3 +AT_SEG_MASK = 4 +NotSet = 0 +TP = 1 +FP = 2 +FN = 3 +TN = 4 +Unknown = 5 +MTP = 11 +IGNORED = 12 +ES_NOT_SET = 0 +ES_READY = 1 +ES_NO_GT_OR_PRED = 2 +ES_EXCEEDS_LIMIT = 3 +ES_NO_CLASS_IDS = 4 @@ -493,54 +713,61 @@ create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='dataset_name', full_name='mir.command.MetadataAttributes.dataset_name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='timestamp', full_name='mir.command.MetadataAttributes.timestamp', index=1, + name='timestamp', full_name='mir.command.MetadataAttributes.timestamp', index=0, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='tvt_type', full_name='mir.command.MetadataAttributes.tvt_type', index=2, + name='tvt_type', full_name='mir.command.MetadataAttributes.tvt_type', index=1, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='asset_type', full_name='mir.command.MetadataAttributes.asset_type', index=3, + name='asset_type', full_name='mir.command.MetadataAttributes.asset_type', index=2, number=4, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='width', full_name='mir.command.MetadataAttributes.width', index=4, + name='width', full_name='mir.command.MetadataAttributes.width', index=3, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='height', full_name='mir.command.MetadataAttributes.height', index=5, + name='height', full_name='mir.command.MetadataAttributes.height', index=4, number=6, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='image_channels', full_name='mir.command.MetadataAttributes.image_channels', index=6, + name='image_channels', full_name='mir.command.MetadataAttributes.image_channels', index=5, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='byte_size', full_name='mir.command.MetadataAttributes.byte_size', index=6, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='origin_filename', full_name='mir.command.MetadataAttributes.origin_filename', index=7, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], @@ -554,7 +781,7 @@ oneofs=[ ], serialized_start=199, - serialized_end=423, + serialized_end=451, ) @@ -568,7 +795,7 @@ fields=[ _descriptor.FieldDescriptor( name='start', full_name='mir.command.Timestamp.start', index=0, - number=1, type=3, cpp_type=2, label=1, + number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -592,28 +819,28 @@ extension_ranges=[], oneofs=[ ], - serialized_start=425, - serialized_end=469, + serialized_start=453, + serialized_end=497, ) -_MIRANNOTATIONS_TASKANNOTATIONSENTRY = _descriptor.Descriptor( - name='TaskAnnotationsEntry', - full_name='mir.command.MirAnnotations.TaskAnnotationsEntry', +_MIRANNOTATIONS_IMAGECKSENTRY = _descriptor.Descriptor( + name='ImageCksEntry', + full_name='mir.command.MirAnnotations.ImageCksEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.MirAnnotations.TaskAnnotationsEntry.key', index=0, + name='key', full_name='mir.command.MirAnnotations.ImageCksEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.MirAnnotations.TaskAnnotationsEntry.value', index=1, + name='value', full_name='mir.command.MirAnnotations.ImageCksEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -631,8 +858,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=588, - serialized_end=678, + serialized_start=694, + serialized_end=770, ) _MIRANNOTATIONS = _descriptor.Descriptor( @@ -644,23 +871,30 @@ create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='task_annotations', full_name='mir.command.MirAnnotations.task_annotations', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + name='ground_truth', full_name='mir.command.MirAnnotations.ground_truth', index=0, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='head_task_id', full_name='mir.command.MirAnnotations.head_task_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='prediction', full_name='mir.command.MirAnnotations.prediction', index=1, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='image_cks', full_name='mir.command.MirAnnotations.image_cks', index=2, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[_MIRANNOTATIONS_TASKANNOTATIONSENTRY, ], + nested_types=[_MIRANNOTATIONS_IMAGECKSENTRY, ], enum_types=[ ], serialized_options=None, @@ -669,8 +903,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=472, - serialized_end=678, + serialized_start=500, + serialized_end=782, ) @@ -708,61 +942,116 @@ extension_ranges=[], oneofs=[ ], - serialized_start=791, - serialized_end=883, + serialized_start=1135, + serialized_end=1227, ) -_SINGLETASKANNOTATIONS = _descriptor.Descriptor( - name='SingleTaskAnnotations', - full_name='mir.command.SingleTaskAnnotations', +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY = _descriptor.Descriptor( + name='MapIdColorEntry', + full_name='mir.command.SingleTaskAnnotations.MapIdColorEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='image_annotations', full_name='mir.command.SingleTaskAnnotations.image_annotations', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + name='key', full_name='mir.command.SingleTaskAnnotations.MapIdColorEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.SingleTaskAnnotations.MapIdColorEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY, ], + nested_types=[], enum_types=[ ], - serialized_options=None, + serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=681, - serialized_end=883, + serialized_start=1229, + serialized_end=1301, ) - -_SINGLEIMAGEANNOTATIONS = _descriptor.Descriptor( - name='SingleImageAnnotations', - full_name='mir.command.SingleImageAnnotations', +_SINGLETASKANNOTATIONS = _descriptor.Descriptor( + name='SingleTaskAnnotations', + full_name='mir.command.SingleTaskAnnotations', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='annotations', full_name='mir.command.SingleImageAnnotations.annotations', index=0, - number=2, type=11, cpp_type=10, label=3, + name='image_annotations', full_name='mir.command.SingleTaskAnnotations.image_annotations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='task_id', full_name='mir.command.SingleTaskAnnotations.task_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='mir.command.SingleTaskAnnotations.type', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='task_class_ids', full_name='mir.command.SingleTaskAnnotations.task_class_ids', index=3, + number=4, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_id_color', full_name='mir.command.SingleTaskAnnotations.map_id_color', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='eval_class_ids', full_name='mir.command.SingleTaskAnnotations.eval_class_ids', index=5, + number=10, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model', full_name='mir.command.SingleTaskAnnotations.model', index=6, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='executor_config', full_name='mir.command.SingleTaskAnnotations.executor_config', index=7, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[], + nested_types=[_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY, _SINGLETASKANNOTATIONS_MAPIDCOLORENTRY, ], enum_types=[ ], serialized_options=None, @@ -771,44 +1060,44 @@ extension_ranges=[], oneofs=[ ], - serialized_start=885, - serialized_end=955, + serialized_start=785, + serialized_end=1301, ) -_ANNOTATION = _descriptor.Descriptor( - name='Annotation', - full_name='mir.command.Annotation', +_SINGLEIMAGEANNOTATIONS = _descriptor.Descriptor( + name='SingleImageAnnotations', + full_name='mir.command.SingleImageAnnotations', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='index', full_name='mir.command.Annotation.index', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='boxes', full_name='mir.command.SingleImageAnnotations.boxes', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='box', full_name='mir.command.Annotation.box', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='polygons', full_name='mir.command.SingleImageAnnotations.polygons', index=1, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='class_id', full_name='mir.command.Annotation.class_id', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='mask', full_name='mir.command.SingleImageAnnotations.mask', index=2, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='score', full_name='mir.command.Annotation.score', index=3, - number=4, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=float(0), + name='img_class_ids', full_name='mir.command.SingleImageAnnotations.img_class_ids', index=3, + number=5, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -824,12 +1113,268 @@ extension_ranges=[], oneofs=[ ], - serialized_start=957, - serialized_end=1049, + serialized_start=1304, + serialized_end=1495, ) -_RECT = _descriptor.Descriptor( +_SINGLEIMAGECKS_CKSENTRY = _descriptor.Descriptor( + name='CksEntry', + full_name='mir.command.SingleImageCks.CksEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.SingleImageCks.CksEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.SingleImageCks.CksEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1590, + serialized_end=1632, +) + +_SINGLEIMAGECKS = _descriptor.Descriptor( + name='SingleImageCks', + full_name='mir.command.SingleImageCks', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='cks', full_name='mir.command.SingleImageCks.cks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='image_quality', full_name='mir.command.SingleImageCks.image_quality', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEIMAGECKS_CKSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1498, + serialized_end=1632, +) + + +_MASKANNOTATION = _descriptor.Descriptor( + name='MaskAnnotation', + full_name='mir.command.MaskAnnotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='semantic_mask', full_name='mir.command.MaskAnnotation.semantic_mask', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='instance_mask', full_name='mir.command.MaskAnnotation.instance_mask', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='object_ids', full_name='mir.command.MaskAnnotation.object_ids', index=2, + number=3, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1634, + serialized_end=1716, +) + + +_OBJECTANNOTATION_TAGSENTRY = _descriptor.Descriptor( + name='TagsEntry', + full_name='mir.command.ObjectAnnotation.TagsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.ObjectAnnotation.TagsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.ObjectAnnotation.TagsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2023, + serialized_end=2066, +) + +_OBJECTANNOTATION = _descriptor.Descriptor( + name='ObjectAnnotation', + full_name='mir.command.ObjectAnnotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='mir.command.ObjectAnnotation.index', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='box', full_name='mir.command.ObjectAnnotation.box', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_id', full_name='mir.command.ObjectAnnotation.class_id', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='score', full_name='mir.command.ObjectAnnotation.score', index=3, + number=4, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='anno_quality', full_name='mir.command.ObjectAnnotation.anno_quality', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tags', full_name='mir.command.ObjectAnnotation.tags', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cm', full_name='mir.command.ObjectAnnotation.cm', index=6, + number=7, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='det_link_id', full_name='mir.command.ObjectAnnotation.det_link_id', index=7, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_name', full_name='mir.command.ObjectAnnotation.class_name', index=8, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='polygon', full_name='mir.command.ObjectAnnotation.polygon', index=9, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_OBJECTANNOTATION_TAGSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1719, + serialized_end=2066, +) + + +_RECT = _descriptor.Descriptor( name='Rect', full_name='mir.command.Rect', filename=None, @@ -838,30 +1383,198 @@ create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='x', full_name='mir.command.Rect.x', index=0, + name='x', full_name='mir.command.Rect.x', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command.Rect.y', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='w', full_name='mir.command.Rect.w', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='h', full_name='mir.command.Rect.h', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='rotate_angle', full_name='mir.command.Rect.rotate_angle', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2068, + serialized_end=2140, +) + + +_MIRKEYWORDS_CKIDXENTRY = _descriptor.Descriptor( + name='CkIdxEntry', + full_name='mir.command.MirKeywords.CkIdxEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.MirKeywords.CkIdxEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.MirKeywords.CkIdxEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2299, + serialized_end=2372, +) + +_MIRKEYWORDS = _descriptor.Descriptor( + name='MirKeywords', + full_name='mir.command.MirKeywords', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='pred_idx', full_name='mir.command.MirKeywords.pred_idx', index=0, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_idx', full_name='mir.command.MirKeywords.gt_idx', index=1, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ck_idx', full_name='mir.command.MirKeywords.ck_idx', index=2, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRKEYWORDS_CKIDXENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2143, + serialized_end=2408, +) + + +_CITAGTOINDEX_CISENTRY = _descriptor.Descriptor( + name='CisEntry', + full_name='mir.command.CiTagToIndex.CisEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.CiTagToIndex.CisEntry.key', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='y', full_name='mir.command.Rect.y', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='value', full_name='mir.command.CiTagToIndex.CisEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2527, + serialized_end=2604, +) + +_CITAGTOINDEX_TAGSENTRY = _descriptor.Descriptor( + name='TagsEntry', + full_name='mir.command.CiTagToIndex.TagsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ _descriptor.FieldDescriptor( - name='w', full_name='mir.command.Rect.w', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='key', full_name='mir.command.CiTagToIndex.TagsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='h', full_name='mir.command.Rect.h', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='value', full_name='mir.command.CiTagToIndex.TagsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -871,36 +1584,67 @@ nested_types=[], enum_types=[ ], - serialized_options=None, + serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=1051, - serialized_end=1101, + serialized_start=2606, + serialized_end=2678, ) - -_MIRKEYWORDS_KEYWORDSENTRY = _descriptor.Descriptor( - name='KeywordsEntry', - full_name='mir.command.MirKeywords.KeywordsEntry', +_CITAGTOINDEX = _descriptor.Descriptor( + name='CiTagToIndex', + full_name='mir.command.CiTagToIndex', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.MirKeywords.KeywordsEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='cis', full_name='mir.command.CiTagToIndex.cis', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.MirKeywords.KeywordsEntry.value', index=1, - number=2, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, + name='tags', full_name='mir.command.CiTagToIndex.tags', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_CITAGTOINDEX_CISENTRY, _CITAGTOINDEX_TAGSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2411, + serialized_end=2678, +) + + +_STRINGLIST = _descriptor.Descriptor( + name='StringList', + full_name='mir.command.StringList', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='asset_ids', full_name='mir.command.StringList.asset_ids', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -910,33 +1654,34 @@ nested_types=[], enum_types=[ ], - serialized_options=b'8\001', + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=1263, - serialized_end=1333, + serialized_start=2680, + serialized_end=2711, ) -_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY = _descriptor.Descriptor( - name='IndexPredifinedKeyidsEntry', - full_name='mir.command.MirKeywords.IndexPredifinedKeyidsEntry', + +_MAPSTRINGTOINT32LIST_KEYIDSENTRY = _descriptor.Descriptor( + name='KeyIdsEntry', + full_name='mir.command.MapStringToInt32List.KeyIdsEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.MirKeywords.IndexPredifinedKeyidsEntry.key', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='key', full_name='mir.command.MapStringToInt32List.KeyIdsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.MirKeywords.IndexPredifinedKeyidsEntry.value', index=1, + name='value', full_name='mir.command.MapStringToInt32List.KeyIdsEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -954,28 +1699,53 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1335, - serialized_end=1416, + serialized_start=2802, + serialized_end=2871, ) -_MIRKEYWORDS = _descriptor.Descriptor( - name='MirKeywords', - full_name='mir.command.MirKeywords', +_MAPSTRINGTOINT32LIST = _descriptor.Descriptor( + name='MapStringToInt32List', + full_name='mir.command.MapStringToInt32List', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='keywords', full_name='mir.command.MirKeywords.keywords', index=0, + name='key_ids', full_name='mir.command.MapStringToInt32List.key_ids', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MAPSTRINGTOINT32LIST_KEYIDSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2714, + serialized_end=2871, +) + + +_INT32LIST = _descriptor.Descriptor( + name='Int32List', + full_name='mir.command.Int32List', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ _descriptor.FieldDescriptor( - name='index_predifined_keyids', full_name='mir.command.MirKeywords.index_predifined_keyids', index=1, - number=6, type=11, cpp_type=10, label=3, + name='ids', full_name='mir.command.Int32List.ids', index=0, + number=1, type=5, cpp_type=1, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -983,7 +1753,7 @@ ], extensions=[ ], - nested_types=[_MIRKEYWORDS_KEYWORDSENTRY, _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY, ], + nested_types=[], enum_types=[ ], serialized_options=None, @@ -992,23 +1762,30 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1104, - serialized_end=1440, + serialized_start=2873, + serialized_end=2897, ) -_ASSETS = _descriptor.Descriptor( - name='Assets', - full_name='mir.command.Assets', +_ASSETANNOINDEX_ASSETANNOSENTRY = _descriptor.Descriptor( + name='AssetAnnosEntry', + full_name='mir.command.AssetAnnoIndex.AssetAnnosEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='asset_ids', full_name='mir.command.Assets.asset_ids', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], + name='key', full_name='mir.command.AssetAnnoIndex.AssetAnnosEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.AssetAnnoIndex.AssetAnnosEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -1018,35 +1795,72 @@ nested_types=[], enum_types=[ ], - serialized_options=None, + serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=1442, - serialized_end=1469, + serialized_start=3050, + serialized_end=3123, ) +_ASSETANNOINDEX_SUBINDEXESENTRY = _descriptor.Descriptor( + name='SubIndexesEntry', + full_name='mir.command.AssetAnnoIndex.SubIndexesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.AssetAnnoIndex.SubIndexesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.AssetAnnoIndex.SubIndexesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3125, + serialized_end=3209, +) -_KEYWORDS = _descriptor.Descriptor( - name='Keywords', - full_name='mir.command.Keywords', +_ASSETANNOINDEX = _descriptor.Descriptor( + name='AssetAnnoIndex', + full_name='mir.command.AssetAnnoIndex', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='predifined_keyids', full_name='mir.command.Keywords.predifined_keyids', index=0, - number=1, type=5, cpp_type=1, label=3, + name='asset_annos', full_name='mir.command.AssetAnnoIndex.asset_annos', index=0, + number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='customized_keywords', full_name='mir.command.Keywords.customized_keywords', index=1, - number=2, type=9, cpp_type=9, label=3, + name='sub_indexes', full_name='mir.command.AssetAnnoIndex.sub_indexes', index=1, + number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -1054,7 +1868,7 @@ ], extensions=[ ], - nested_types=[], + nested_types=[_ASSETANNOINDEX_ASSETANNOSENTRY, _ASSETANNOINDEX_SUBINDEXESENTRY, ], enum_types=[ ], serialized_options=None, @@ -1063,8 +1877,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1471, - serialized_end=1537, + serialized_start=2900, + serialized_end=3209, ) @@ -1102,8 +1916,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1623, - serialized_end=1686, + serialized_start=3295, + serialized_end=3358, ) _MIRTASKS = _descriptor.Descriptor( @@ -1140,28 +1954,28 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1540, - serialized_end=1686, + serialized_start=3212, + serialized_end=3358, ) -_TASK_UNKNOWNTYPESENTRY = _descriptor.Descriptor( - name='UnknownTypesEntry', - full_name='mir.command.Task.UnknownTypesEntry', +_TASK_NEWTYPESENTRY = _descriptor.Descriptor( + name='NewTypesEntry', + full_name='mir.command.Task.NewTypesEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.Task.UnknownTypesEntry.key', index=0, + name='key', full_name='mir.command.Task.NewTypesEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.Task.UnknownTypesEntry.value', index=1, + name='value', full_name='mir.command.Task.NewTypesEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -1179,8 +1993,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2094, - serialized_end=2145, + serialized_start=3783, + serialized_end=3830, ) _TASK = _descriptor.Descriptor( @@ -1214,7 +2028,7 @@ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( name='timestamp', full_name='mir.command.Task.timestamp', index=3, - number=5, type=3, cpp_type=2, label=1, + number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, @@ -1227,72 +2041,184 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='unknown_types', full_name='mir.command.Task.unknown_types', index=5, - number=7, type=11, cpp_type=10, label=3, + name='return_code', full_name='mir.command.Task.return_code', index=5, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='return_msg', full_name='mir.command.Task.return_msg', index=6, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='evaluation', full_name='mir.command.Task.evaluation', index=7, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='new_types', full_name='mir.command.Task.new_types', index=8, + number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='return_code', full_name='mir.command.Task.return_code', index=6, - number=8, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='new_types_added', full_name='mir.command.Task.new_types_added', index=9, + number=12, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='return_msg', full_name='mir.command.Task.return_msg', index=7, - number=9, type=9, cpp_type=9, label=1, + name='serialized_task_parameters', full_name='mir.command.Task.serialized_task_parameters', index=10, + number=102, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='evaluation', full_name='mir.command.Task.evaluation', index=8, - number=10, type=11, cpp_type=10, label=1, + name='serialized_executor_config', full_name='mir.command.Task.serialized_executor_config', index=11, + number=103, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='src_revs', full_name='mir.command.Task.src_revs', index=12, + number=104, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dst_rev', full_name='mir.command.Task.dst_rev', index=13, + number=105, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='executor', full_name='mir.command.Task.executor', index=14, + number=106, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_TASK_NEWTYPESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3361, + serialized_end=3854, +) + + +_MODELMETA_STAGESENTRY = _descriptor.Descriptor( + name='StagesEntry', + full_name='mir.command.ModelMeta.StagesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.ModelMeta.StagesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.ModelMeta.StagesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4037, + serialized_end=4107, +) + +_MODELMETA = _descriptor.Descriptor( + name='ModelMeta', + full_name='mir.command.ModelMeta', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='model_hash', full_name='mir.command.ModelMeta.model_hash', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='serialized_task_parameters', full_name='mir.command.Task.serialized_task_parameters', index=9, - number=102, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='mean_average_precision', full_name='mir.command.ModelMeta.mean_average_precision', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='serialized_executor_config', full_name='mir.command.Task.serialized_executor_config', index=10, - number=103, type=9, cpp_type=9, label=1, + name='context', full_name='mir.command.ModelMeta.context', index=2, + number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='src_revs', full_name='mir.command.Task.src_revs', index=11, - number=104, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='stages', full_name='mir.command.ModelMeta.stages', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='dst_rev', full_name='mir.command.Task.dst_rev', index=12, - number=105, type=9, cpp_type=9, label=1, + name='best_stage_name', full_name='mir.command.ModelMeta.best_stage_name', index=4, + number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='executor', full_name='mir.command.Task.executor', index=13, - number=106, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='class_names', full_name='mir.command.ModelMeta.class_names', index=5, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[_TASK_UNKNOWNTYPESENTRY, ], + nested_types=[_MODELMETA_STAGESENTRY, ], enum_types=[ ], serialized_options=None, @@ -1301,37 +2227,44 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1689, - serialized_end=2163, + serialized_start=3857, + serialized_end=4107, ) -_MODELMETA = _descriptor.Descriptor( - name='ModelMeta', - full_name='mir.command.ModelMeta', +_MODELSTAGE = _descriptor.Descriptor( + name='ModelStage', + full_name='mir.command.ModelStage', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='model_hash', full_name='mir.command.ModelMeta.model_hash', index=0, + name='stage_name', full_name='mir.command.ModelStage.stage_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='mean_average_precision', full_name='mir.command.ModelMeta.mean_average_precision', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), + name='files', full_name='mir.command.ModelStage.files', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='context', full_name='mir.command.ModelMeta.context', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='timestamp', full_name='mir.command.ModelStage.timestamp', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mAP', full_name='mir.command.ModelStage.mAP', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -1347,28 +2280,28 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2165, - serialized_end=2245, + serialized_start=4109, + serialized_end=4188, ) -_EVALUATION_DATASETEVALUATIONSENTRY = _descriptor.Descriptor( - name='DatasetEvaluationsEntry', - full_name='mir.command.Evaluation.DatasetEvaluationsEntry', +_EVALUATION_SUBCKSENTRY = _descriptor.Descriptor( + name='SubCksEntry', + full_name='mir.command.Evaluation.SubCksEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.Evaluation.DatasetEvaluationsEntry.key', index=0, + name='key', full_name='mir.command.Evaluation.SubCksEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.Evaluation.DatasetEvaluationsEntry.value', index=1, + name='value', full_name='mir.command.Evaluation.SubCksEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -1386,8 +2319,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2385, - serialized_end=2480, + serialized_start=4470, + serialized_end=4553, ) _EVALUATION = _descriptor.Descriptor( @@ -1406,16 +2339,37 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='dataset_evaluations', full_name='mir.command.Evaluation.dataset_evaluations', index=1, - number=2, type=11, cpp_type=10, label=3, + name='dataset_evaluation', full_name='mir.command.Evaluation.dataset_evaluation', index=1, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='main_ck', full_name='mir.command.Evaluation.main_ck', index=2, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='sub_cks', full_name='mir.command.Evaluation.sub_cks', index=3, + number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='state', full_name='mir.command.Evaluation.state', index=4, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[_EVALUATION_DATASETEVALUATIONSENTRY, ], + nested_types=[_EVALUATION_SUBCKSENTRY, ], enum_types=[ ], serialized_options=None, @@ -1424,8 +2378,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2248, - serialized_end=2480, + serialized_start=4191, + serialized_end=4559, ) @@ -1438,37 +2392,37 @@ create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='gt_dataset_id', full_name='mir.command.EvaluateConfig.gt_dataset_id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='conf_thr', full_name='mir.command.EvaluateConfig.conf_thr', index=0, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='pred_dataset_ids', full_name='mir.command.EvaluateConfig.pred_dataset_ids', index=1, - number=2, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], + name='iou_thrs_interval', full_name='mir.command.EvaluateConfig.iou_thrs_interval', index=1, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='conf_thr', full_name='mir.command.EvaluateConfig.conf_thr', index=2, - number=3, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), + name='need_pr_curve', full_name='mir.command.EvaluateConfig.need_pr_curve', index=2, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='iou_thrs_interval', full_name='mir.command.EvaluateConfig.iou_thrs_interval', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='class_ids', full_name='mir.command.EvaluateConfig.class_ids', index=3, + number=7, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='need_pr_curve', full_name='mir.command.EvaluateConfig.need_pr_curve', index=4, - number=5, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, + name='main_ck', full_name='mir.command.EvaluateConfig.main_ck', index=4, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -1484,8 +2438,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2483, - serialized_end=2616, + serialized_start=4562, + serialized_end=4700, ) @@ -1523,8 +2477,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2862, - serialized_end=2949, + serialized_start=4898, + serialized_end=4985, ) _SINGLEDATASETEVALUATION = _descriptor.Descriptor( @@ -1543,28 +2497,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='gt_dataset_id', full_name='mir.command.SingleDatasetEvaluation.gt_dataset_id', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='pred_dataset_id', full_name='mir.command.SingleDatasetEvaluation.pred_dataset_id', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='iou_evaluations', full_name='mir.command.SingleDatasetEvaluation.iou_evaluations', index=3, + name='iou_evaluations', full_name='mir.command.SingleDatasetEvaluation.iou_evaluations', index=1, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='iou_averaged_evaluation', full_name='mir.command.SingleDatasetEvaluation.iou_averaged_evaluation', index=4, + name='iou_averaged_evaluation', full_name='mir.command.SingleDatasetEvaluation.iou_averaged_evaluation', index=2, number=5, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -1582,8 +2522,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2619, - serialized_end=2949, + serialized_start=4703, + serialized_end=4997, ) @@ -1621,27 +2561,225 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3203, - serialized_end=3291, + serialized_start=5170, + serialized_end=5260, +) + +_SINGLEIOUEVALUATION = _descriptor.Descriptor( + name='SingleIouEvaluation', + full_name='mir.command.SingleIouEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ci_evaluations', full_name='mir.command.SingleIouEvaluation.ci_evaluations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ci_averaged_evaluation', full_name='mir.command.SingleIouEvaluation.ci_averaged_evaluation', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5000, + serialized_end=5266, +) + + +_SINGLEEVALUATIONELEMENT = _descriptor.Descriptor( + name='SingleEvaluationElement', + full_name='mir.command.SingleEvaluationElement', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ap', full_name='mir.command.SingleEvaluationElement.ap', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ar', full_name='mir.command.SingleEvaluationElement.ar', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tp', full_name='mir.command.SingleEvaluationElement.tp', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fp', full_name='mir.command.SingleEvaluationElement.fp', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fn', full_name='mir.command.SingleEvaluationElement.fn', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pr_curve', full_name='mir.command.SingleEvaluationElement.pr_curve', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5269, + serialized_end=5397, +) + + +_INTPOINT = _descriptor.Descriptor( + name='IntPoint', + full_name='mir.command.IntPoint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command.IntPoint.x', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command.IntPoint.y', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='z', full_name='mir.command.IntPoint.z', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5399, + serialized_end=5442, +) + + +_FLOATPOINT = _descriptor.Descriptor( + name='FloatPoint', + full_name='mir.command.FloatPoint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command.FloatPoint.x', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command.FloatPoint.y', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='z', full_name='mir.command.FloatPoint.z', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5444, + serialized_end=5489, ) -_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY = _descriptor.Descriptor( - name='TopicEvaluationsEntry', - full_name='mir.command.SingleIouEvaluation.TopicEvaluationsEntry', + +_MIRCONTEXT_CKSCNTENTRY = _descriptor.Descriptor( + name='CksCntEntry', + full_name='mir.command.MirContext.CksCntEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.SingleIouEvaluation.TopicEvaluationsEntry.key', index=0, + name='key', full_name='mir.command.MirContext.CksCntEntry.key', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.SingleIouEvaluation.TopicEvaluationsEntry.value', index=1, + name='value', full_name='mir.command.MirContext.CksCntEntry.value', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -1659,43 +2797,57 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3293, - serialized_end=3384, + serialized_start=5694, + serialized_end=5768, ) -_SINGLEIOUEVALUATION = _descriptor.Descriptor( - name='SingleIouEvaluation', - full_name='mir.command.SingleIouEvaluation', +_MIRCONTEXT = _descriptor.Descriptor( + name='MirContext', + full_name='mir.command.MirContext', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='ci_evaluations', full_name='mir.command.SingleIouEvaluation.ci_evaluations', index=0, - number=1, type=11, cpp_type=10, label=3, + name='images_cnt', full_name='mir.command.MirContext.images_cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cks_cnt', full_name='mir.command.MirContext.cks_cnt', index=1, + number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='ci_averaged_evaluation', full_name='mir.command.SingleIouEvaluation.ci_averaged_evaluation', index=1, - number=2, type=11, cpp_type=10, label=1, + name='total_asset_mbytes', full_name='mir.command.MirContext.total_asset_mbytes', index=2, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_stats', full_name='mir.command.MirContext.pred_stats', index=3, + number=100, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='topic_evaluations', full_name='mir.command.SingleIouEvaluation.topic_evaluations', index=2, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + name='gt_stats', full_name='mir.command.MirContext.gt_stats', index=4, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY, ], + nested_types=[_MIRCONTEXT_CKSCNTENTRY, ], enum_types=[ ], serialized_options=None, @@ -1704,104 +2856,75 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2952, - serialized_end=3384, + serialized_start=5492, + serialized_end=5822, ) -_SINGLETOPICEVALUATION = _descriptor.Descriptor( - name='SingleTopicEvaluation', - full_name='mir.command.SingleTopicEvaluation', +_SINGLEMAPCOUNT_SUBCNTENTRY = _descriptor.Descriptor( + name='SubCntEntry', + full_name='mir.command.SingleMapCount.SubCntEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='ap', full_name='mir.command.SingleTopicEvaluation.ap', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='ar', full_name='mir.command.SingleTopicEvaluation.ar', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='tp', full_name='mir.command.SingleTopicEvaluation.tp', index=2, - number=3, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='fp', full_name='mir.command.SingleTopicEvaluation.fp', index=3, - number=4, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='key', full_name='mir.command.SingleMapCount.SubCntEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='fn', full_name='mir.command.SingleTopicEvaluation.fn', index=4, - number=5, type=5, cpp_type=1, label=1, + name='value', full_name='mir.command.SingleMapCount.SubCntEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), - _descriptor.FieldDescriptor( - name='pr_curve', full_name='mir.command.SingleTopicEvaluation.pr_curve', index=5, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], nested_types=[], enum_types=[ ], - serialized_options=None, + serialized_options=b'8\001', is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=3386, - serialized_end=3512, + serialized_start=5914, + serialized_end=5959, ) - -_FLOATPOINT = _descriptor.Descriptor( - name='FloatPoint', - full_name='mir.command.FloatPoint', +_SINGLEMAPCOUNT = _descriptor.Descriptor( + name='SingleMapCount', + full_name='mir.command.SingleMapCount', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='x', full_name='mir.command.FloatPoint.x', index=0, - number=1, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), + name='cnt', full_name='mir.command.SingleMapCount.cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='y', full_name='mir.command.FloatPoint.y', index=1, - number=2, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), + name='sub_cnt', full_name='mir.command.SingleMapCount.sub_cnt', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[], + nested_types=[_SINGLEMAPCOUNT_SUBCNTENTRY, ], enum_types=[ ], serialized_options=None, @@ -1810,30 +2933,30 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3514, - serialized_end=3548, + serialized_start=5825, + serialized_end=5959, ) -_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY = _descriptor.Descriptor( - name='PredefinedKeyidsCntEntry', - full_name='mir.command.MirContext.PredefinedKeyidsCntEntry', +_ANNOSTATS_TAGSCNTENTRY = _descriptor.Descriptor( + name='TagsCntEntry', + full_name='mir.command.AnnoStats.TagsCntEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.MirContext.PredefinedKeyidsCntEntry.key', index=0, - number=1, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='key', full_name='mir.command.AnnoStats.TagsCntEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.MirContext.PredefinedKeyidsCntEntry.value', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='value', full_name='mir.command.AnnoStats.TagsCntEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), @@ -1849,27 +2972,27 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3913, - serialized_end=3971, + serialized_start=6193, + serialized_end=6268, ) -_MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY = _descriptor.Descriptor( - name='ProjectPredefinedKeyidsCntEntry', - full_name='mir.command.MirContext.ProjectPredefinedKeyidsCntEntry', +_ANNOSTATS_CLASSIDSCNTENTRY = _descriptor.Descriptor( + name='ClassIdsCntEntry', + full_name='mir.command.AnnoStats.ClassIdsCntEntry', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.MirContext.ProjectPredefinedKeyidsCntEntry.key', index=0, + name='key', full_name='mir.command.AnnoStats.ClassIdsCntEntry.key', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.MirContext.ProjectPredefinedKeyidsCntEntry.value', index=1, + name='value', full_name='mir.command.AnnoStats.ClassIdsCntEntry.value', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, @@ -1887,102 +3010,187 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3973, - serialized_end=4038, + serialized_start=6270, + serialized_end=6320, ) -_MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY = _descriptor.Descriptor( - name='CustomizedKeywordsCntEntry', - full_name='mir.command.MirContext.CustomizedKeywordsCntEntry', +_ANNOSTATS = _descriptor.Descriptor( + name='AnnoStats', + full_name='mir.command.AnnoStats', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='key', full_name='mir.command.MirContext.CustomizedKeywordsCntEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=b"".decode('utf-8'), + name='total_cnt', full_name='mir.command.AnnoStats.total_cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='value', full_name='mir.command.MirContext.CustomizedKeywordsCntEntry.value', index=1, + name='positive_asset_cnt', full_name='mir.command.AnnoStats.positive_asset_cnt', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_asset_cnt', full_name='mir.command.AnnoStats.negative_asset_cnt', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tags_cnt', full_name='mir.command.AnnoStats.tags_cnt', index=3, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_ids_cnt', full_name='mir.command.AnnoStats.class_ids_cnt', index=4, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='eval_class_ids', full_name='mir.command.AnnoStats.eval_class_ids', index=5, + number=9, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[], + nested_types=[_ANNOSTATS_TAGSCNTENTRY, _ANNOSTATS_CLASSIDSCNTENTRY, ], enum_types=[ ], - serialized_options=b'8\001', + serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], - serialized_start=4040, - serialized_end=4100, + serialized_start=5962, + serialized_end=6338, ) -_MIRCONTEXT = _descriptor.Descriptor( - name='MirContext', - full_name='mir.command.MirContext', + +_EXPORTCONFIG = _descriptor.Descriptor( + name='ExportConfig', + full_name='mir.command.ExportConfig', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( - name='images_cnt', full_name='mir.command.MirContext.images_cnt', index=0, - number=1, type=5, cpp_type=1, label=1, + name='asset_format', full_name='mir.command.ExportConfig.asset_format', index=0, + number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='negative_images_cnt', full_name='mir.command.MirContext.negative_images_cnt', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, + name='asset_dir', full_name='mir.command.ExportConfig.asset_dir', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='project_negative_images_cnt', full_name='mir.command.MirContext.project_negative_images_cnt', index=2, - number=3, type=5, cpp_type=1, label=1, + name='asset_index_file', full_name='mir.command.ExportConfig.asset_index_file', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='asset_index_prefix', full_name='mir.command.ExportConfig.asset_index_prefix', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='media_location', full_name='mir.command.ExportConfig.media_location', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='need_sub_folder', full_name='mir.command.ExportConfig.need_sub_folder', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='anno_format', full_name='mir.command.ExportConfig.anno_format', index=6, + number=50, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='predefined_keyids_cnt', full_name='mir.command.MirContext.predefined_keyids_cnt', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + name='gt_dir', full_name='mir.command.ExportConfig.gt_dir', index=7, + number=51, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='project_predefined_keyids_cnt', full_name='mir.command.MirContext.project_predefined_keyids_cnt', index=4, - number=5, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + name='gt_index_file', full_name='mir.command.ExportConfig.gt_index_file', index=8, + number=52, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='customized_keywords_cnt', full_name='mir.command.MirContext.customized_keywords_cnt', index=5, - number=6, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], + name='gt_index_prefix', full_name='mir.command.ExportConfig.gt_index_prefix', index=9, + number=53, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_dir', full_name='mir.command.ExportConfig.pred_dir', index=10, + number=54, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_index_file', full_name='mir.command.ExportConfig.pred_index_file', index=11, + number=55, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_index_prefix', full_name='mir.command.ExportConfig.pred_index_prefix', index=12, + number=56, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tvt_index_dir', full_name='mir.command.ExportConfig.tvt_index_dir', index=13, + number=57, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), ], extensions=[ ], - nested_types=[_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY, _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY, _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY, ], + nested_types=[], enum_types=[ ], serialized_options=None, @@ -1991,8 +3199,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3551, - serialized_end=4100, + serialized_start=6341, + serialized_end=6728, ) _MIRMETADATAS_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _METADATAATTRIBUTES @@ -2001,78 +3209,132 @@ _METADATAATTRIBUTES.fields_by_name['timestamp'].message_type = _TIMESTAMP _METADATAATTRIBUTES.fields_by_name['tvt_type'].enum_type = _TVTTYPE _METADATAATTRIBUTES.fields_by_name['asset_type'].enum_type = _ASSETTYPE -_MIRANNOTATIONS_TASKANNOTATIONSENTRY.fields_by_name['value'].message_type = _SINGLETASKANNOTATIONS -_MIRANNOTATIONS_TASKANNOTATIONSENTRY.containing_type = _MIRANNOTATIONS -_MIRANNOTATIONS.fields_by_name['task_annotations'].message_type = _MIRANNOTATIONS_TASKANNOTATIONSENTRY +_MIRANNOTATIONS_IMAGECKSENTRY.fields_by_name['value'].message_type = _SINGLEIMAGECKS +_MIRANNOTATIONS_IMAGECKSENTRY.containing_type = _MIRANNOTATIONS +_MIRANNOTATIONS.fields_by_name['ground_truth'].message_type = _SINGLETASKANNOTATIONS +_MIRANNOTATIONS.fields_by_name['prediction'].message_type = _SINGLETASKANNOTATIONS +_MIRANNOTATIONS.fields_by_name['image_cks'].message_type = _MIRANNOTATIONS_IMAGECKSENTRY _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIMAGEANNOTATIONS _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY.containing_type = _SINGLETASKANNOTATIONS +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY.fields_by_name['value'].message_type = _INTPOINT +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY.containing_type = _SINGLETASKANNOTATIONS _SINGLETASKANNOTATIONS.fields_by_name['image_annotations'].message_type = _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY -_SINGLEIMAGEANNOTATIONS.fields_by_name['annotations'].message_type = _ANNOTATION -_ANNOTATION.fields_by_name['box'].message_type = _RECT -_MIRKEYWORDS_KEYWORDSENTRY.fields_by_name['value'].message_type = _KEYWORDS -_MIRKEYWORDS_KEYWORDSENTRY.containing_type = _MIRKEYWORDS -_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY.fields_by_name['value'].message_type = _ASSETS -_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY.containing_type = _MIRKEYWORDS -_MIRKEYWORDS.fields_by_name['keywords'].message_type = _MIRKEYWORDS_KEYWORDSENTRY -_MIRKEYWORDS.fields_by_name['index_predifined_keyids'].message_type = _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY +_SINGLETASKANNOTATIONS.fields_by_name['type'].enum_type = _ANNOTYPE +_SINGLETASKANNOTATIONS.fields_by_name['map_id_color'].message_type = _SINGLETASKANNOTATIONS_MAPIDCOLORENTRY +_SINGLETASKANNOTATIONS.fields_by_name['model'].message_type = _MODELMETA +_SINGLEIMAGEANNOTATIONS.fields_by_name['boxes'].message_type = _OBJECTANNOTATION +_SINGLEIMAGEANNOTATIONS.fields_by_name['polygons'].message_type = _OBJECTANNOTATION +_SINGLEIMAGEANNOTATIONS.fields_by_name['mask'].message_type = _MASKANNOTATION +_SINGLEIMAGECKS_CKSENTRY.containing_type = _SINGLEIMAGECKS +_SINGLEIMAGECKS.fields_by_name['cks'].message_type = _SINGLEIMAGECKS_CKSENTRY +_OBJECTANNOTATION_TAGSENTRY.containing_type = _OBJECTANNOTATION +_OBJECTANNOTATION.fields_by_name['box'].message_type = _RECT +_OBJECTANNOTATION.fields_by_name['tags'].message_type = _OBJECTANNOTATION_TAGSENTRY +_OBJECTANNOTATION.fields_by_name['cm'].enum_type = _CONFUSIONMATRIXTYPE +_OBJECTANNOTATION.fields_by_name['polygon'].message_type = _INTPOINT +_MIRKEYWORDS_CKIDXENTRY.fields_by_name['value'].message_type = _ASSETANNOINDEX +_MIRKEYWORDS_CKIDXENTRY.containing_type = _MIRKEYWORDS +_MIRKEYWORDS.fields_by_name['pred_idx'].message_type = _CITAGTOINDEX +_MIRKEYWORDS.fields_by_name['gt_idx'].message_type = _CITAGTOINDEX +_MIRKEYWORDS.fields_by_name['ck_idx'].message_type = _MIRKEYWORDS_CKIDXENTRY +_CITAGTOINDEX_CISENTRY.fields_by_name['value'].message_type = _MAPSTRINGTOINT32LIST +_CITAGTOINDEX_CISENTRY.containing_type = _CITAGTOINDEX +_CITAGTOINDEX_TAGSENTRY.fields_by_name['value'].message_type = _ASSETANNOINDEX +_CITAGTOINDEX_TAGSENTRY.containing_type = _CITAGTOINDEX +_CITAGTOINDEX.fields_by_name['cis'].message_type = _CITAGTOINDEX_CISENTRY +_CITAGTOINDEX.fields_by_name['tags'].message_type = _CITAGTOINDEX_TAGSENTRY +_MAPSTRINGTOINT32LIST_KEYIDSENTRY.fields_by_name['value'].message_type = _INT32LIST +_MAPSTRINGTOINT32LIST_KEYIDSENTRY.containing_type = _MAPSTRINGTOINT32LIST +_MAPSTRINGTOINT32LIST.fields_by_name['key_ids'].message_type = _MAPSTRINGTOINT32LIST_KEYIDSENTRY +_ASSETANNOINDEX_ASSETANNOSENTRY.fields_by_name['value'].message_type = _INT32LIST +_ASSETANNOINDEX_ASSETANNOSENTRY.containing_type = _ASSETANNOINDEX +_ASSETANNOINDEX_SUBINDEXESENTRY.fields_by_name['value'].message_type = _MAPSTRINGTOINT32LIST +_ASSETANNOINDEX_SUBINDEXESENTRY.containing_type = _ASSETANNOINDEX +_ASSETANNOINDEX.fields_by_name['asset_annos'].message_type = _ASSETANNOINDEX_ASSETANNOSENTRY +_ASSETANNOINDEX.fields_by_name['sub_indexes'].message_type = _ASSETANNOINDEX_SUBINDEXESENTRY _MIRTASKS_TASKSENTRY.fields_by_name['value'].message_type = _TASK _MIRTASKS_TASKSENTRY.containing_type = _MIRTASKS _MIRTASKS.fields_by_name['tasks'].message_type = _MIRTASKS_TASKSENTRY -_TASK_UNKNOWNTYPESENTRY.containing_type = _TASK +_TASK_NEWTYPESENTRY.containing_type = _TASK _TASK.fields_by_name['type'].enum_type = _TASKTYPE _TASK.fields_by_name['model'].message_type = _MODELMETA -_TASK.fields_by_name['unknown_types'].message_type = _TASK_UNKNOWNTYPESENTRY _TASK.fields_by_name['evaluation'].message_type = _EVALUATION -_EVALUATION_DATASETEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEDATASETEVALUATION -_EVALUATION_DATASETEVALUATIONSENTRY.containing_type = _EVALUATION +_TASK.fields_by_name['new_types'].message_type = _TASK_NEWTYPESENTRY +_MODELMETA_STAGESENTRY.fields_by_name['value'].message_type = _MODELSTAGE +_MODELMETA_STAGESENTRY.containing_type = _MODELMETA +_MODELMETA.fields_by_name['stages'].message_type = _MODELMETA_STAGESENTRY +_EVALUATION_SUBCKSENTRY.fields_by_name['value'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION_SUBCKSENTRY.containing_type = _EVALUATION _EVALUATION.fields_by_name['config'].message_type = _EVALUATECONFIG -_EVALUATION.fields_by_name['dataset_evaluations'].message_type = _EVALUATION_DATASETEVALUATIONSENTRY +_EVALUATION.fields_by_name['dataset_evaluation'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION.fields_by_name['main_ck'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION.fields_by_name['sub_cks'].message_type = _EVALUATION_SUBCKSENTRY +_EVALUATION.fields_by_name['state'].enum_type = _EVALUATIONSTATE _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIOUEVALUATION _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.containing_type = _SINGLEDATASETEVALUATION _SINGLEDATASETEVALUATION.fields_by_name['iou_evaluations'].message_type = _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY _SINGLEDATASETEVALUATION.fields_by_name['iou_averaged_evaluation'].message_type = _SINGLEIOUEVALUATION -_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEEVALUATIONELEMENT _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION -_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLETOPICEVALUATION -_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION _SINGLEIOUEVALUATION.fields_by_name['ci_evaluations'].message_type = _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY -_SINGLEIOUEVALUATION.fields_by_name['ci_averaged_evaluation'].message_type = _SINGLETOPICEVALUATION -_SINGLEIOUEVALUATION.fields_by_name['topic_evaluations'].message_type = _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY -_SINGLETOPICEVALUATION.fields_by_name['pr_curve'].message_type = _FLOATPOINT -_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY.containing_type = _MIRCONTEXT -_MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY.containing_type = _MIRCONTEXT -_MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY.containing_type = _MIRCONTEXT -_MIRCONTEXT.fields_by_name['predefined_keyids_cnt'].message_type = _MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY -_MIRCONTEXT.fields_by_name['project_predefined_keyids_cnt'].message_type = _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY -_MIRCONTEXT.fields_by_name['customized_keywords_cnt'].message_type = _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY +_SINGLEIOUEVALUATION.fields_by_name['ci_averaged_evaluation'].message_type = _SINGLEEVALUATIONELEMENT +_SINGLEEVALUATIONELEMENT.fields_by_name['pr_curve'].message_type = _FLOATPOINT +_MIRCONTEXT_CKSCNTENTRY.fields_by_name['value'].message_type = _SINGLEMAPCOUNT +_MIRCONTEXT_CKSCNTENTRY.containing_type = _MIRCONTEXT +_MIRCONTEXT.fields_by_name['cks_cnt'].message_type = _MIRCONTEXT_CKSCNTENTRY +_MIRCONTEXT.fields_by_name['pred_stats'].message_type = _ANNOSTATS +_MIRCONTEXT.fields_by_name['gt_stats'].message_type = _ANNOSTATS +_SINGLEMAPCOUNT_SUBCNTENTRY.containing_type = _SINGLEMAPCOUNT +_SINGLEMAPCOUNT.fields_by_name['sub_cnt'].message_type = _SINGLEMAPCOUNT_SUBCNTENTRY +_ANNOSTATS_TAGSCNTENTRY.fields_by_name['value'].message_type = _SINGLEMAPCOUNT +_ANNOSTATS_TAGSCNTENTRY.containing_type = _ANNOSTATS +_ANNOSTATS_CLASSIDSCNTENTRY.containing_type = _ANNOSTATS +_ANNOSTATS.fields_by_name['tags_cnt'].message_type = _ANNOSTATS_TAGSCNTENTRY +_ANNOSTATS.fields_by_name['class_ids_cnt'].message_type = _ANNOSTATS_CLASSIDSCNTENTRY +_EXPORTCONFIG.fields_by_name['asset_format'].enum_type = _ASSETFORMAT +_EXPORTCONFIG.fields_by_name['anno_format'].enum_type = _ANNOFORMAT DESCRIPTOR.message_types_by_name['MirMetadatas'] = _MIRMETADATAS DESCRIPTOR.message_types_by_name['MetadataAttributes'] = _METADATAATTRIBUTES DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP DESCRIPTOR.message_types_by_name['MirAnnotations'] = _MIRANNOTATIONS DESCRIPTOR.message_types_by_name['SingleTaskAnnotations'] = _SINGLETASKANNOTATIONS DESCRIPTOR.message_types_by_name['SingleImageAnnotations'] = _SINGLEIMAGEANNOTATIONS -DESCRIPTOR.message_types_by_name['Annotation'] = _ANNOTATION +DESCRIPTOR.message_types_by_name['SingleImageCks'] = _SINGLEIMAGECKS +DESCRIPTOR.message_types_by_name['MaskAnnotation'] = _MASKANNOTATION +DESCRIPTOR.message_types_by_name['ObjectAnnotation'] = _OBJECTANNOTATION DESCRIPTOR.message_types_by_name['Rect'] = _RECT DESCRIPTOR.message_types_by_name['MirKeywords'] = _MIRKEYWORDS -DESCRIPTOR.message_types_by_name['Assets'] = _ASSETS -DESCRIPTOR.message_types_by_name['Keywords'] = _KEYWORDS +DESCRIPTOR.message_types_by_name['CiTagToIndex'] = _CITAGTOINDEX +DESCRIPTOR.message_types_by_name['StringList'] = _STRINGLIST +DESCRIPTOR.message_types_by_name['MapStringToInt32List'] = _MAPSTRINGTOINT32LIST +DESCRIPTOR.message_types_by_name['Int32List'] = _INT32LIST +DESCRIPTOR.message_types_by_name['AssetAnnoIndex'] = _ASSETANNOINDEX DESCRIPTOR.message_types_by_name['MirTasks'] = _MIRTASKS DESCRIPTOR.message_types_by_name['Task'] = _TASK DESCRIPTOR.message_types_by_name['ModelMeta'] = _MODELMETA +DESCRIPTOR.message_types_by_name['ModelStage'] = _MODELSTAGE DESCRIPTOR.message_types_by_name['Evaluation'] = _EVALUATION DESCRIPTOR.message_types_by_name['EvaluateConfig'] = _EVALUATECONFIG DESCRIPTOR.message_types_by_name['SingleDatasetEvaluation'] = _SINGLEDATASETEVALUATION DESCRIPTOR.message_types_by_name['SingleIouEvaluation'] = _SINGLEIOUEVALUATION -DESCRIPTOR.message_types_by_name['SingleTopicEvaluation'] = _SINGLETOPICEVALUATION +DESCRIPTOR.message_types_by_name['SingleEvaluationElement'] = _SINGLEEVALUATIONELEMENT +DESCRIPTOR.message_types_by_name['IntPoint'] = _INTPOINT DESCRIPTOR.message_types_by_name['FloatPoint'] = _FLOATPOINT DESCRIPTOR.message_types_by_name['MirContext'] = _MIRCONTEXT +DESCRIPTOR.message_types_by_name['SingleMapCount'] = _SINGLEMAPCOUNT +DESCRIPTOR.message_types_by_name['AnnoStats'] = _ANNOSTATS +DESCRIPTOR.message_types_by_name['ExportConfig'] = _EXPORTCONFIG DESCRIPTOR.enum_types_by_name['TvtType'] = _TVTTYPE DESCRIPTOR.enum_types_by_name['AssetType'] = _ASSETTYPE DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE DESCRIPTOR.enum_types_by_name['TaskState'] = _TASKSTATE DESCRIPTOR.enum_types_by_name['Sha1Type'] = _SHA1TYPE DESCRIPTOR.enum_types_by_name['MirStorage'] = _MIRSTORAGE -DESCRIPTOR.enum_types_by_name['LabelFormat'] = _LABELFORMAT +DESCRIPTOR.enum_types_by_name['AnnoFormat'] = _ANNOFORMAT +DESCRIPTOR.enum_types_by_name['AssetFormat'] = _ASSETFORMAT +DESCRIPTOR.enum_types_by_name['AnnoType'] = _ANNOTYPE +DESCRIPTOR.enum_types_by_name['ConfusionMatrixType'] = _CONFUSIONMATRIXTYPE +DESCRIPTOR.enum_types_by_name['EvaluationState'] = _EVALUATIONSTATE _sym_db.RegisterFileDescriptor(DESCRIPTOR) MirMetadatas = _reflection.GeneratedProtocolMessageType('MirMetadatas', (_message.Message,), { @@ -2106,10 +3368,10 @@ MirAnnotations = _reflection.GeneratedProtocolMessageType('MirAnnotations', (_message.Message,), { - 'TaskAnnotationsEntry' : _reflection.GeneratedProtocolMessageType('TaskAnnotationsEntry', (_message.Message,), { - 'DESCRIPTOR' : _MIRANNOTATIONS_TASKANNOTATIONSENTRY, + 'ImageCksEntry' : _reflection.GeneratedProtocolMessageType('ImageCksEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRANNOTATIONS_IMAGECKSENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirAnnotations.TaskAnnotationsEntry) + # @@protoc_insertion_point(class_scope:mir.command.MirAnnotations.ImageCksEntry) }) , 'DESCRIPTOR' : _MIRANNOTATIONS, @@ -2117,7 +3379,7 @@ # @@protoc_insertion_point(class_scope:mir.command.MirAnnotations) }) _sym_db.RegisterMessage(MirAnnotations) -_sym_db.RegisterMessage(MirAnnotations.TaskAnnotationsEntry) +_sym_db.RegisterMessage(MirAnnotations.ImageCksEntry) SingleTaskAnnotations = _reflection.GeneratedProtocolMessageType('SingleTaskAnnotations', (_message.Message,), { @@ -2127,12 +3389,20 @@ # @@protoc_insertion_point(class_scope:mir.command.SingleTaskAnnotations.ImageAnnotationsEntry) }) , + + 'MapIdColorEntry' : _reflection.GeneratedProtocolMessageType('MapIdColorEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLETASKANNOTATIONS_MAPIDCOLORENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleTaskAnnotations.MapIdColorEntry) + }) + , 'DESCRIPTOR' : _SINGLETASKANNOTATIONS, '__module__' : 'mir_command_pb2' # @@protoc_insertion_point(class_scope:mir.command.SingleTaskAnnotations) }) _sym_db.RegisterMessage(SingleTaskAnnotations) _sym_db.RegisterMessage(SingleTaskAnnotations.ImageAnnotationsEntry) +_sym_db.RegisterMessage(SingleTaskAnnotations.MapIdColorEntry) SingleImageAnnotations = _reflection.GeneratedProtocolMessageType('SingleImageAnnotations', (_message.Message,), { 'DESCRIPTOR' : _SINGLEIMAGEANNOTATIONS, @@ -2141,12 +3411,42 @@ }) _sym_db.RegisterMessage(SingleImageAnnotations) -Annotation = _reflection.GeneratedProtocolMessageType('Annotation', (_message.Message,), { - 'DESCRIPTOR' : _ANNOTATION, +SingleImageCks = _reflection.GeneratedProtocolMessageType('SingleImageCks', (_message.Message,), { + + 'CksEntry' : _reflection.GeneratedProtocolMessageType('CksEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIMAGECKS_CKSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleImageCks.CksEntry) + }) + , + 'DESCRIPTOR' : _SINGLEIMAGECKS, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleImageCks) + }) +_sym_db.RegisterMessage(SingleImageCks) +_sym_db.RegisterMessage(SingleImageCks.CksEntry) + +MaskAnnotation = _reflection.GeneratedProtocolMessageType('MaskAnnotation', (_message.Message,), { + 'DESCRIPTOR' : _MASKANNOTATION, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.MaskAnnotation) + }) +_sym_db.RegisterMessage(MaskAnnotation) + +ObjectAnnotation = _reflection.GeneratedProtocolMessageType('ObjectAnnotation', (_message.Message,), { + + 'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), { + 'DESCRIPTOR' : _OBJECTANNOTATION_TAGSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.ObjectAnnotation.TagsEntry) + }) + , + 'DESCRIPTOR' : _OBJECTANNOTATION, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.Annotation) + # @@protoc_insertion_point(class_scope:mir.command.ObjectAnnotation) }) -_sym_db.RegisterMessage(Annotation) +_sym_db.RegisterMessage(ObjectAnnotation) +_sym_db.RegisterMessage(ObjectAnnotation.TagsEntry) Rect = _reflection.GeneratedProtocolMessageType('Rect', (_message.Message,), { 'DESCRIPTOR' : _RECT, @@ -2157,40 +3457,93 @@ MirKeywords = _reflection.GeneratedProtocolMessageType('MirKeywords', (_message.Message,), { - 'KeywordsEntry' : _reflection.GeneratedProtocolMessageType('KeywordsEntry', (_message.Message,), { - 'DESCRIPTOR' : _MIRKEYWORDS_KEYWORDSENTRY, + 'CkIdxEntry' : _reflection.GeneratedProtocolMessageType('CkIdxEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRKEYWORDS_CKIDXENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirKeywords.KeywordsEntry) + # @@protoc_insertion_point(class_scope:mir.command.MirKeywords.CkIdxEntry) }) , + 'DESCRIPTOR' : _MIRKEYWORDS, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.MirKeywords) + }) +_sym_db.RegisterMessage(MirKeywords) +_sym_db.RegisterMessage(MirKeywords.CkIdxEntry) + +CiTagToIndex = _reflection.GeneratedProtocolMessageType('CiTagToIndex', (_message.Message,), { - 'IndexPredifinedKeyidsEntry' : _reflection.GeneratedProtocolMessageType('IndexPredifinedKeyidsEntry', (_message.Message,), { - 'DESCRIPTOR' : _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY, + 'CisEntry' : _reflection.GeneratedProtocolMessageType('CisEntry', (_message.Message,), { + 'DESCRIPTOR' : _CITAGTOINDEX_CISENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirKeywords.IndexPredifinedKeyidsEntry) + # @@protoc_insertion_point(class_scope:mir.command.CiTagToIndex.CisEntry) }) , - 'DESCRIPTOR' : _MIRKEYWORDS, + + 'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), { + 'DESCRIPTOR' : _CITAGTOINDEX_TAGSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.CiTagToIndex.TagsEntry) + }) + , + 'DESCRIPTOR' : _CITAGTOINDEX, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirKeywords) + # @@protoc_insertion_point(class_scope:mir.command.CiTagToIndex) }) -_sym_db.RegisterMessage(MirKeywords) -_sym_db.RegisterMessage(MirKeywords.KeywordsEntry) -_sym_db.RegisterMessage(MirKeywords.IndexPredifinedKeyidsEntry) +_sym_db.RegisterMessage(CiTagToIndex) +_sym_db.RegisterMessage(CiTagToIndex.CisEntry) +_sym_db.RegisterMessage(CiTagToIndex.TagsEntry) + +StringList = _reflection.GeneratedProtocolMessageType('StringList', (_message.Message,), { + 'DESCRIPTOR' : _STRINGLIST, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.StringList) + }) +_sym_db.RegisterMessage(StringList) -Assets = _reflection.GeneratedProtocolMessageType('Assets', (_message.Message,), { - 'DESCRIPTOR' : _ASSETS, +MapStringToInt32List = _reflection.GeneratedProtocolMessageType('MapStringToInt32List', (_message.Message,), { + + 'KeyIdsEntry' : _reflection.GeneratedProtocolMessageType('KeyIdsEntry', (_message.Message,), { + 'DESCRIPTOR' : _MAPSTRINGTOINT32LIST_KEYIDSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.MapStringToInt32List.KeyIdsEntry) + }) + , + 'DESCRIPTOR' : _MAPSTRINGTOINT32LIST, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.Assets) + # @@protoc_insertion_point(class_scope:mir.command.MapStringToInt32List) }) -_sym_db.RegisterMessage(Assets) +_sym_db.RegisterMessage(MapStringToInt32List) +_sym_db.RegisterMessage(MapStringToInt32List.KeyIdsEntry) + +Int32List = _reflection.GeneratedProtocolMessageType('Int32List', (_message.Message,), { + 'DESCRIPTOR' : _INT32LIST, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.Int32List) + }) +_sym_db.RegisterMessage(Int32List) + +AssetAnnoIndex = _reflection.GeneratedProtocolMessageType('AssetAnnoIndex', (_message.Message,), { -Keywords = _reflection.GeneratedProtocolMessageType('Keywords', (_message.Message,), { - 'DESCRIPTOR' : _KEYWORDS, + 'AssetAnnosEntry' : _reflection.GeneratedProtocolMessageType('AssetAnnosEntry', (_message.Message,), { + 'DESCRIPTOR' : _ASSETANNOINDEX_ASSETANNOSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.AssetAnnoIndex.AssetAnnosEntry) + }) + , + + 'SubIndexesEntry' : _reflection.GeneratedProtocolMessageType('SubIndexesEntry', (_message.Message,), { + 'DESCRIPTOR' : _ASSETANNOINDEX_SUBINDEXESENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.AssetAnnoIndex.SubIndexesEntry) + }) + , + 'DESCRIPTOR' : _ASSETANNOINDEX, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.Keywords) + # @@protoc_insertion_point(class_scope:mir.command.AssetAnnoIndex) }) -_sym_db.RegisterMessage(Keywords) +_sym_db.RegisterMessage(AssetAnnoIndex) +_sym_db.RegisterMessage(AssetAnnoIndex.AssetAnnosEntry) +_sym_db.RegisterMessage(AssetAnnoIndex.SubIndexesEntry) MirTasks = _reflection.GeneratedProtocolMessageType('MirTasks', (_message.Message,), { @@ -2209,10 +3562,10 @@ Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), { - 'UnknownTypesEntry' : _reflection.GeneratedProtocolMessageType('UnknownTypesEntry', (_message.Message,), { - 'DESCRIPTOR' : _TASK_UNKNOWNTYPESENTRY, + 'NewTypesEntry' : _reflection.GeneratedProtocolMessageType('NewTypesEntry', (_message.Message,), { + 'DESCRIPTOR' : _TASK_NEWTYPESENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.Task.UnknownTypesEntry) + # @@protoc_insertion_point(class_scope:mir.command.Task.NewTypesEntry) }) , 'DESCRIPTOR' : _TASK, @@ -2220,21 +3573,36 @@ # @@protoc_insertion_point(class_scope:mir.command.Task) }) _sym_db.RegisterMessage(Task) -_sym_db.RegisterMessage(Task.UnknownTypesEntry) +_sym_db.RegisterMessage(Task.NewTypesEntry) ModelMeta = _reflection.GeneratedProtocolMessageType('ModelMeta', (_message.Message,), { + + 'StagesEntry' : _reflection.GeneratedProtocolMessageType('StagesEntry', (_message.Message,), { + 'DESCRIPTOR' : _MODELMETA_STAGESENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.ModelMeta.StagesEntry) + }) + , 'DESCRIPTOR' : _MODELMETA, '__module__' : 'mir_command_pb2' # @@protoc_insertion_point(class_scope:mir.command.ModelMeta) }) _sym_db.RegisterMessage(ModelMeta) +_sym_db.RegisterMessage(ModelMeta.StagesEntry) + +ModelStage = _reflection.GeneratedProtocolMessageType('ModelStage', (_message.Message,), { + 'DESCRIPTOR' : _MODELSTAGE, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.ModelStage) + }) +_sym_db.RegisterMessage(ModelStage) Evaluation = _reflection.GeneratedProtocolMessageType('Evaluation', (_message.Message,), { - 'DatasetEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('DatasetEvaluationsEntry', (_message.Message,), { - 'DESCRIPTOR' : _EVALUATION_DATASETEVALUATIONSENTRY, + 'SubCksEntry' : _reflection.GeneratedProtocolMessageType('SubCksEntry', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATION_SUBCKSENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.Evaluation.DatasetEvaluationsEntry) + # @@protoc_insertion_point(class_scope:mir.command.Evaluation.SubCksEntry) }) , 'DESCRIPTOR' : _EVALUATION, @@ -2242,7 +3610,7 @@ # @@protoc_insertion_point(class_scope:mir.command.Evaluation) }) _sym_db.RegisterMessage(Evaluation) -_sym_db.RegisterMessage(Evaluation.DatasetEvaluationsEntry) +_sym_db.RegisterMessage(Evaluation.SubCksEntry) EvaluateConfig = _reflection.GeneratedProtocolMessageType('EvaluateConfig', (_message.Message,), { 'DESCRIPTOR' : _EVALUATECONFIG, @@ -2274,27 +3642,26 @@ # @@protoc_insertion_point(class_scope:mir.command.SingleIouEvaluation.CiEvaluationsEntry) }) , - - 'TopicEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('TopicEvaluationsEntry', (_message.Message,), { - 'DESCRIPTOR' : _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY, - '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.SingleIouEvaluation.TopicEvaluationsEntry) - }) - , 'DESCRIPTOR' : _SINGLEIOUEVALUATION, '__module__' : 'mir_command_pb2' # @@protoc_insertion_point(class_scope:mir.command.SingleIouEvaluation) }) _sym_db.RegisterMessage(SingleIouEvaluation) _sym_db.RegisterMessage(SingleIouEvaluation.CiEvaluationsEntry) -_sym_db.RegisterMessage(SingleIouEvaluation.TopicEvaluationsEntry) -SingleTopicEvaluation = _reflection.GeneratedProtocolMessageType('SingleTopicEvaluation', (_message.Message,), { - 'DESCRIPTOR' : _SINGLETOPICEVALUATION, +SingleEvaluationElement = _reflection.GeneratedProtocolMessageType('SingleEvaluationElement', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEEVALUATIONELEMENT, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleEvaluationElement) + }) +_sym_db.RegisterMessage(SingleEvaluationElement) + +IntPoint = _reflection.GeneratedProtocolMessageType('IntPoint', (_message.Message,), { + 'DESCRIPTOR' : _INTPOINT, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.SingleTopicEvaluation) + # @@protoc_insertion_point(class_scope:mir.command.IntPoint) }) -_sym_db.RegisterMessage(SingleTopicEvaluation) +_sym_db.RegisterMessage(IntPoint) FloatPoint = _reflection.GeneratedProtocolMessageType('FloatPoint', (_message.Message,), { 'DESCRIPTOR' : _FLOATPOINT, @@ -2305,48 +3672,86 @@ MirContext = _reflection.GeneratedProtocolMessageType('MirContext', (_message.Message,), { - 'PredefinedKeyidsCntEntry' : _reflection.GeneratedProtocolMessageType('PredefinedKeyidsCntEntry', (_message.Message,), { - 'DESCRIPTOR' : _MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY, + 'CksCntEntry' : _reflection.GeneratedProtocolMessageType('CksCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRCONTEXT_CKSCNTENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.MirContext.CksCntEntry) + }) + , + 'DESCRIPTOR' : _MIRCONTEXT, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.MirContext) + }) +_sym_db.RegisterMessage(MirContext) +_sym_db.RegisterMessage(MirContext.CksCntEntry) + +SingleMapCount = _reflection.GeneratedProtocolMessageType('SingleMapCount', (_message.Message,), { + + 'SubCntEntry' : _reflection.GeneratedProtocolMessageType('SubCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEMAPCOUNT_SUBCNTENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirContext.PredefinedKeyidsCntEntry) + # @@protoc_insertion_point(class_scope:mir.command.SingleMapCount.SubCntEntry) }) , + 'DESCRIPTOR' : _SINGLEMAPCOUNT, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleMapCount) + }) +_sym_db.RegisterMessage(SingleMapCount) +_sym_db.RegisterMessage(SingleMapCount.SubCntEntry) + +AnnoStats = _reflection.GeneratedProtocolMessageType('AnnoStats', (_message.Message,), { - 'ProjectPredefinedKeyidsCntEntry' : _reflection.GeneratedProtocolMessageType('ProjectPredefinedKeyidsCntEntry', (_message.Message,), { - 'DESCRIPTOR' : _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY, + 'TagsCntEntry' : _reflection.GeneratedProtocolMessageType('TagsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _ANNOSTATS_TAGSCNTENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirContext.ProjectPredefinedKeyidsCntEntry) + # @@protoc_insertion_point(class_scope:mir.command.AnnoStats.TagsCntEntry) }) , - 'CustomizedKeywordsCntEntry' : _reflection.GeneratedProtocolMessageType('CustomizedKeywordsCntEntry', (_message.Message,), { - 'DESCRIPTOR' : _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY, + 'ClassIdsCntEntry' : _reflection.GeneratedProtocolMessageType('ClassIdsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _ANNOSTATS_CLASSIDSCNTENTRY, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirContext.CustomizedKeywordsCntEntry) + # @@protoc_insertion_point(class_scope:mir.command.AnnoStats.ClassIdsCntEntry) }) , - 'DESCRIPTOR' : _MIRCONTEXT, + 'DESCRIPTOR' : _ANNOSTATS, '__module__' : 'mir_command_pb2' - # @@protoc_insertion_point(class_scope:mir.command.MirContext) + # @@protoc_insertion_point(class_scope:mir.command.AnnoStats) }) -_sym_db.RegisterMessage(MirContext) -_sym_db.RegisterMessage(MirContext.PredefinedKeyidsCntEntry) -_sym_db.RegisterMessage(MirContext.ProjectPredefinedKeyidsCntEntry) -_sym_db.RegisterMessage(MirContext.CustomizedKeywordsCntEntry) +_sym_db.RegisterMessage(AnnoStats) +_sym_db.RegisterMessage(AnnoStats.TagsCntEntry) +_sym_db.RegisterMessage(AnnoStats.ClassIdsCntEntry) + +ExportConfig = _reflection.GeneratedProtocolMessageType('ExportConfig', (_message.Message,), { + 'DESCRIPTOR' : _EXPORTCONFIG, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.ExportConfig) + }) +_sym_db.RegisterMessage(ExportConfig) +DESCRIPTOR._options = None _MIRMETADATAS_ATTRIBUTESENTRY._options = None -_MIRANNOTATIONS_TASKANNOTATIONSENTRY._options = None +_MIRANNOTATIONS_IMAGECKSENTRY._options = None _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY._options = None -_MIRKEYWORDS_KEYWORDSENTRY._options = None -_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY._options = None +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY._options = None +_SINGLEIMAGECKS_CKSENTRY._options = None +_OBJECTANNOTATION_TAGSENTRY._options = None +_MIRKEYWORDS_CKIDXENTRY._options = None +_CITAGTOINDEX_CISENTRY._options = None +_CITAGTOINDEX_TAGSENTRY._options = None +_MAPSTRINGTOINT32LIST_KEYIDSENTRY._options = None +_ASSETANNOINDEX_ASSETANNOSENTRY._options = None +_ASSETANNOINDEX_SUBINDEXESENTRY._options = None _MIRTASKS_TASKSENTRY._options = None -_TASK_UNKNOWNTYPESENTRY._options = None -_EVALUATION_DATASETEVALUATIONSENTRY._options = None +_TASK_NEWTYPESENTRY._options = None +_MODELMETA_STAGESENTRY._options = None +_EVALUATION_SUBCKSENTRY._options = None _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY._options = None _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY._options = None -_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY._options = None -_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY._options = None -_MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY._options = None -_MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY._options = None +_MIRCONTEXT_CKSCNTENTRY._options = None +_SINGLEMAPCOUNT_SUBCNTENTRY._options = None +_ANNOSTATS_TAGSCNTENTRY._options = None +_ANNOSTATS_CLASSIDSCNTENTRY._options = None # @@protoc_insertion_point(module_scope) diff --git a/ymir/command/mir/protos/mir_command_pb2.pyi b/ymir/command/mir/protos/mir_command_pb2.pyi index b1874b43a4..a98ed74336 100644 --- a/ymir/command/mir/protos/mir_command_pb2.pyi +++ b/ymir/command/mir/protos/mir_command_pb2.pyi @@ -84,6 +84,8 @@ class _TaskTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumT TaskTypeInit = TaskType.V(12) TaskTypeImportModel = TaskType.V(13) + TaskTypeCopyModel = TaskType.V(14) + TaskTypeDatasetInfer = TaskType.V(15) TaskTypeEvaluate = TaskType.V(16) TaskTypeUnknown = TaskType.V(0) @@ -102,6 +104,8 @@ TaskTypeFusion = TaskType.V(11) TaskTypeInit = TaskType.V(12) TaskTypeImportModel = TaskType.V(13) +TaskTypeCopyModel = TaskType.V(14) +TaskTypeDatasetInfer = TaskType.V(15) TaskTypeEvaluate = TaskType.V(16) global___TaskType = TaskType @@ -164,20 +168,148 @@ MIR_CONTEXT = MirStorage.V(4) global___MirStorage = MirStorage -class LabelFormat(_LabelFormat, metaclass=_LabelFormatEnumTypeWrapper): +class AnnoFormat(_AnnoFormat, metaclass=_AnnoFormatEnumTypeWrapper): pass -class _LabelFormat: +class _AnnoFormat: V = typing.NewType('V', builtins.int) -class _LabelFormatEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_LabelFormat.V], builtins.type): +class _AnnoFormatEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_AnnoFormat.V], builtins.type): DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ... - NO_ANNOTATION = LabelFormat.V(0) - PASCAL_VOC = LabelFormat.V(1) - IF_ARK = LabelFormat.V(2) + AF_NO_ANNOTATION = AnnoFormat.V(0) + AF_DET_PASCAL_VOC = AnnoFormat.V(1) + AF_DET_ARK_JSON = AnnoFormat.V(2) + AF_DET_LS_JSON = AnnoFormat.V(3) + AF_SEG_POLYGON = AnnoFormat.V(4) + AF_SEG_MASK = AnnoFormat.V(5) + +AF_NO_ANNOTATION = AnnoFormat.V(0) +AF_DET_PASCAL_VOC = AnnoFormat.V(1) +AF_DET_ARK_JSON = AnnoFormat.V(2) +AF_DET_LS_JSON = AnnoFormat.V(3) +AF_SEG_POLYGON = AnnoFormat.V(4) +AF_SEG_MASK = AnnoFormat.V(5) +global___AnnoFormat = AnnoFormat + + +class AssetFormat(_AssetFormat, metaclass=_AssetFormatEnumTypeWrapper): + pass +class _AssetFormat: + V = typing.NewType('V', builtins.int) +class _AssetFormatEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_AssetFormat.V], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ... + AF_UNKNOWN = AssetFormat.V(0) + AF_RAW = AssetFormat.V(1) + AF_LMDB = AssetFormat.V(2) + +AF_UNKNOWN = AssetFormat.V(0) +AF_RAW = AssetFormat.V(1) +AF_LMDB = AssetFormat.V(2) +global___AssetFormat = AssetFormat + + +class AnnoType(_AnnoType, metaclass=_AnnoTypeEnumTypeWrapper): + pass +class _AnnoType: + V = typing.NewType('V', builtins.int) +class _AnnoTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_AnnoType.V], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ... + AT_UNKNOWN = AnnoType.V(0) + AT_CLASS = AnnoType.V(1) + """Classification with class id, not implemented.""" + + AT_DET_BOX = AnnoType.V(2) + """Detection w. bounding box.""" + + AT_SEG_POLYGON = AnnoType.V(3) + """Semantic Segmentation w. ploygons.""" + + AT_SEG_MASK = AnnoType.V(4) + """Instance Segmentation w. mask.""" + + +AT_UNKNOWN = AnnoType.V(0) +AT_CLASS = AnnoType.V(1) +"""Classification with class id, not implemented.""" + +AT_DET_BOX = AnnoType.V(2) +"""Detection w. bounding box.""" + +AT_SEG_POLYGON = AnnoType.V(3) +"""Semantic Segmentation w. ploygons.""" + +AT_SEG_MASK = AnnoType.V(4) +"""Instance Segmentation w. mask.""" + +global___AnnoType = AnnoType + -NO_ANNOTATION = LabelFormat.V(0) -PASCAL_VOC = LabelFormat.V(1) -IF_ARK = LabelFormat.V(2) -global___LabelFormat = LabelFormat +class ConfusionMatrixType(_ConfusionMatrixType, metaclass=_ConfusionMatrixTypeEnumTypeWrapper): + pass +class _ConfusionMatrixType: + V = typing.NewType('V', builtins.int) +class _ConfusionMatrixTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ConfusionMatrixType.V], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ... + NotSet = ConfusionMatrixType.V(0) + TP = ConfusionMatrixType.V(1) + FP = ConfusionMatrixType.V(2) + FN = ConfusionMatrixType.V(3) + TN = ConfusionMatrixType.V(4) + Unknown = ConfusionMatrixType.V(5) + MTP = ConfusionMatrixType.V(11) + """Matched True Positive, only for gt.""" + + IGNORED = ConfusionMatrixType.V(12) + +NotSet = ConfusionMatrixType.V(0) +TP = ConfusionMatrixType.V(1) +FP = ConfusionMatrixType.V(2) +FN = ConfusionMatrixType.V(3) +TN = ConfusionMatrixType.V(4) +Unknown = ConfusionMatrixType.V(5) +MTP = ConfusionMatrixType.V(11) +"""Matched True Positive, only for gt.""" + +IGNORED = ConfusionMatrixType.V(12) +global___ConfusionMatrixType = ConfusionMatrixType + + +class EvaluationState(_EvaluationState, metaclass=_EvaluationStateEnumTypeWrapper): + pass +class _EvaluationState: + V = typing.NewType('V', builtins.int) +class _EvaluationStateEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_EvaluationState.V], builtins.type): + DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor = ... + ES_NOT_SET = EvaluationState.V(0) + """evaluate not started""" + + ES_READY = EvaluationState.V(1) + """evaluation result ready to use""" + + ES_NO_GT_OR_PRED = EvaluationState.V(2) + """evaluation not finished because there's no gt or pred""" + + ES_EXCEEDS_LIMIT = EvaluationState.V(3) + """evaluation not finished because there're too many images or too many class ids""" + + ES_NO_CLASS_IDS = EvaluationState.V(4) + """evaluation not finished because there's no evaluate class ids""" + + +ES_NOT_SET = EvaluationState.V(0) +"""evaluate not started""" + +ES_READY = EvaluationState.V(1) +"""evaluation result ready to use""" + +ES_NO_GT_OR_PRED = EvaluationState.V(2) +"""evaluation not finished because there's no gt or pred""" + +ES_EXCEEDS_LIMIT = EvaluationState.V(3) +"""evaluation not finished because there're too many images or too many class ids""" + +ES_NO_CLASS_IDS = EvaluationState.V(4) +"""evaluation not finished because there's no evaluate class ids""" + +global___EvaluationState = EvaluationState class MirMetadatas(google.protobuf.message.Message): @@ -212,14 +344,14 @@ global___MirMetadatas = MirMetadatas class MetadataAttributes(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - DATASET_NAME_FIELD_NUMBER: builtins.int TIMESTAMP_FIELD_NUMBER: builtins.int TVT_TYPE_FIELD_NUMBER: builtins.int ASSET_TYPE_FIELD_NUMBER: builtins.int WIDTH_FIELD_NUMBER: builtins.int HEIGHT_FIELD_NUMBER: builtins.int IMAGE_CHANNELS_FIELD_NUMBER: builtins.int - dataset_name: typing.Text = ... + BYTE_SIZE_FIELD_NUMBER: builtins.int + ORIGIN_FILENAME_FIELD_NUMBER: builtins.int @property def timestamp(self) -> global___Timestamp: ... tvt_type: global___TvtType.V = ... @@ -233,18 +365,21 @@ class MetadataAttributes(google.protobuf.message.Message): image_channels: builtins.int = ... """/ (for images) channel count""" + byte_size: builtins.int = ... + origin_filename: typing.Text = ... def __init__(self, *, - dataset_name : typing.Text = ..., timestamp : typing.Optional[global___Timestamp] = ..., tvt_type : global___TvtType.V = ..., asset_type : global___AssetType.V = ..., width : builtins.int = ..., height : builtins.int = ..., image_channels : builtins.int = ..., + byte_size : builtins.int = ..., + origin_filename : typing.Text = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["timestamp",b"timestamp"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["asset_type",b"asset_type","dataset_name",b"dataset_name","height",b"height","image_channels",b"image_channels","timestamp",b"timestamp","tvt_type",b"tvt_type","width",b"width"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["asset_type",b"asset_type","byte_size",b"byte_size","height",b"height","image_channels",b"image_channels","origin_filename",b"origin_filename","timestamp",b"timestamp","tvt_type",b"tvt_type","width",b"width"]) -> None: ... global___MetadataAttributes = MetadataAttributes class Timestamp(google.protobuf.message.Message): @@ -252,7 +387,7 @@ class Timestamp(google.protobuf.message.Message): START_FIELD_NUMBER: builtins.int DURATION_FIELD_NUMBER: builtins.int start: builtins.int = ... - """/ start time stamp""" + """/ start time stamp, use int32 as int64 is not correctly parsed.""" duration: builtins.float = ... """/ duration (in seconds), for images, it's always 0""" @@ -268,34 +403,40 @@ global___Timestamp = Timestamp class MirAnnotations(google.protobuf.message.Message): """/ ========== annotations.mir ==========""" DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - class TaskAnnotationsEntry(google.protobuf.message.Message): + class ImageCksEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int key: typing.Text = ... @property - def value(self) -> global___SingleTaskAnnotations: ... + def value(self) -> global___SingleImageCks: ... def __init__(self, *, key : typing.Text = ..., - value : typing.Optional[global___SingleTaskAnnotations] = ..., + value : typing.Optional[global___SingleImageCks] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - TASK_ANNOTATIONS_FIELD_NUMBER: builtins.int - HEAD_TASK_ID_FIELD_NUMBER: builtins.int + GROUND_TRUTH_FIELD_NUMBER: builtins.int + PREDICTION_FIELD_NUMBER: builtins.int + IMAGE_CKS_FIELD_NUMBER: builtins.int + @property + def ground_truth(self) -> global___SingleTaskAnnotations: ... @property - def task_annotations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleTaskAnnotations]: - """/ key: task id, value: annotations of that single task""" + def prediction(self) -> global___SingleTaskAnnotations: ... + @property + def image_cks(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleImageCks]: + """key: asset id, value: cks and image quality, from pred and gt""" pass - head_task_id: typing.Text = ... def __init__(self, *, - task_annotations : typing.Optional[typing.Mapping[typing.Text, global___SingleTaskAnnotations]] = ..., - head_task_id : typing.Text = ..., + ground_truth : typing.Optional[global___SingleTaskAnnotations] = ..., + prediction : typing.Optional[global___SingleTaskAnnotations] = ..., + image_cks : typing.Optional[typing.Mapping[typing.Text, global___SingleImageCks]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["head_task_id",b"head_task_id","task_annotations",b"task_annotations"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["ground_truth",b"ground_truth","prediction",b"prediction"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["ground_truth",b"ground_truth","image_cks",b"image_cks","prediction",b"prediction"]) -> None: ... global___MirAnnotations = MirAnnotations class SingleTaskAnnotations(google.protobuf.message.Message): @@ -315,36 +456,169 @@ class SingleTaskAnnotations(google.protobuf.message.Message): def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + class MapIdColorEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int = ... + @property + def value(self) -> global___IntPoint: ... + def __init__(self, + *, + key : builtins.int = ..., + value : typing.Optional[global___IntPoint] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + IMAGE_ANNOTATIONS_FIELD_NUMBER: builtins.int + TASK_ID_FIELD_NUMBER: builtins.int + TYPE_FIELD_NUMBER: builtins.int + TASK_CLASS_IDS_FIELD_NUMBER: builtins.int + MAP_ID_COLOR_FIELD_NUMBER: builtins.int + EVAL_CLASS_IDS_FIELD_NUMBER: builtins.int + MODEL_FIELD_NUMBER: builtins.int + EXECUTOR_CONFIG_FIELD_NUMBER: builtins.int @property def image_annotations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleImageAnnotations]: """/ key: image id, value: annotations of that single image""" pass + task_id: typing.Text = ... + type: global___AnnoType.V = ... + @property + def task_class_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: + """Set of all shown class ids.""" + pass + @property + def map_id_color(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___IntPoint]: ... + @property + def eval_class_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: + """meta infos of this SingleTaskAnnotations""" + pass + @property + def model(self) -> global___ModelMeta: + """model meta info associated with this single_task_annotations""" + pass + executor_config: typing.Text = ... + """executor config used to generate this single task annotations""" + def __init__(self, *, image_annotations : typing.Optional[typing.Mapping[typing.Text, global___SingleImageAnnotations]] = ..., + task_id : typing.Text = ..., + type : global___AnnoType.V = ..., + task_class_ids : typing.Optional[typing.Iterable[builtins.int]] = ..., + map_id_color : typing.Optional[typing.Mapping[builtins.int, global___IntPoint]] = ..., + eval_class_ids : typing.Optional[typing.Iterable[builtins.int]] = ..., + model : typing.Optional[global___ModelMeta] = ..., + executor_config : typing.Text = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["image_annotations",b"image_annotations"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["model",b"model"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["eval_class_ids",b"eval_class_ids","executor_config",b"executor_config","image_annotations",b"image_annotations","map_id_color",b"map_id_color","model",b"model","task_class_ids",b"task_class_ids","task_id",b"task_id","type",b"type"]) -> None: ... global___SingleTaskAnnotations = SingleTaskAnnotations class SingleImageAnnotations(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - ANNOTATIONS_FIELD_NUMBER: builtins.int + BOXES_FIELD_NUMBER: builtins.int + POLYGONS_FIELD_NUMBER: builtins.int + MASK_FIELD_NUMBER: builtins.int + IMG_CLASS_IDS_FIELD_NUMBER: builtins.int + @property + def boxes(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ObjectAnnotation]: ... + @property + def polygons(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___ObjectAnnotation]: ... @property - def annotations(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Annotation]: ... + def mask(self) -> global___MaskAnnotation: ... + @property + def img_class_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: + """Set of class ids shown in this image.""" + pass def __init__(self, *, - annotations : typing.Optional[typing.Iterable[global___Annotation]] = ..., + boxes : typing.Optional[typing.Iterable[global___ObjectAnnotation]] = ..., + polygons : typing.Optional[typing.Iterable[global___ObjectAnnotation]] = ..., + mask : typing.Optional[global___MaskAnnotation] = ..., + img_class_ids : typing.Optional[typing.Iterable[builtins.int]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["annotations",b"annotations"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["mask",b"mask"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["boxes",b"boxes","img_class_ids",b"img_class_ids","mask",b"mask","polygons",b"polygons"]) -> None: ... global___SingleImageAnnotations = SingleImageAnnotations -class Annotation(google.protobuf.message.Message): +class SingleImageCks(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class CksEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + value: typing.Text = ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + CKS_FIELD_NUMBER: builtins.int + IMAGE_QUALITY_FIELD_NUMBER: builtins.int + @property + def cks(self) -> google.protobuf.internal.containers.ScalarMap[typing.Text, typing.Text]: ... + image_quality: builtins.float = ... + def __init__(self, + *, + cks : typing.Optional[typing.Mapping[typing.Text, typing.Text]] = ..., + image_quality : builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["cks",b"cks","image_quality",b"image_quality"]) -> None: ... +global___SingleImageCks = SingleImageCks + +class MaskAnnotation(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + SEMANTIC_MASK_FIELD_NUMBER: builtins.int + INSTANCE_MASK_FIELD_NUMBER: builtins.int + OBJECT_IDS_FIELD_NUMBER: builtins.int + semantic_mask: builtins.bytes = ... + """PNG image with 3 channels where each pixel corresponds to a class_id.""" + + instance_mask: builtins.bytes = ... + """PNG image with 3 channels where each pixel corresponds to an object_id.""" + + @property + def object_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + semantic_mask : builtins.bytes = ..., + instance_mask : builtins.bytes = ..., + object_ids : typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["instance_mask",b"instance_mask","object_ids",b"object_ids","semantic_mask",b"semantic_mask"]) -> None: ... +global___MaskAnnotation = MaskAnnotation + +class ObjectAnnotation(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class TagsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + value: typing.Text = ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + INDEX_FIELD_NUMBER: builtins.int BOX_FIELD_NUMBER: builtins.int CLASS_ID_FIELD_NUMBER: builtins.int SCORE_FIELD_NUMBER: builtins.int + ANNO_QUALITY_FIELD_NUMBER: builtins.int + TAGS_FIELD_NUMBER: builtins.int + CM_FIELD_NUMBER: builtins.int + DET_LINK_ID_FIELD_NUMBER: builtins.int + CLASS_NAME_FIELD_NUMBER: builtins.int + POLYGON_FIELD_NUMBER: builtins.int index: builtins.int = ... """Index of this annotation in current single image, may be different from the index in repeated field.""" @@ -352,16 +626,32 @@ class Annotation(google.protobuf.message.Message): def box(self) -> global___Rect: ... class_id: builtins.int = ... score: builtins.float = ... + anno_quality: builtins.float = ... + @property + def tags(self) -> google.protobuf.internal.containers.ScalarMap[typing.Text, typing.Text]: ... + cm: global___ConfusionMatrixType.V = ... + det_link_id: builtins.int = ... + class_name: typing.Text = ... + """for data parsed from outside, e.g. inference.""" + + @property + def polygon(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___IntPoint]: ... def __init__(self, *, index : builtins.int = ..., box : typing.Optional[global___Rect] = ..., class_id : builtins.int = ..., score : builtins.float = ..., + anno_quality : builtins.float = ..., + tags : typing.Optional[typing.Mapping[typing.Text, typing.Text]] = ..., + cm : global___ConfusionMatrixType.V = ..., + det_link_id : builtins.int = ..., + class_name : typing.Text = ..., + polygon : typing.Optional[typing.Iterable[global___IntPoint]] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["box",b"box"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["box",b"box","class_id",b"class_id","index",b"index","score",b"score"]) -> None: ... -global___Annotation = Annotation + def ClearField(self, field_name: typing_extensions.Literal["anno_quality",b"anno_quality","box",b"box","class_id",b"class_id","class_name",b"class_name","cm",b"cm","det_link_id",b"det_link_id","index",b"index","polygon",b"polygon","score",b"score","tags",b"tags"]) -> None: ... +global___ObjectAnnotation = ObjectAnnotation class Rect(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... @@ -369,74 +659,119 @@ class Rect(google.protobuf.message.Message): Y_FIELD_NUMBER: builtins.int W_FIELD_NUMBER: builtins.int H_FIELD_NUMBER: builtins.int + ROTATE_ANGLE_FIELD_NUMBER: builtins.int x: builtins.int = ... y: builtins.int = ... w: builtins.int = ... h: builtins.int = ... + rotate_angle: builtins.float = ... + """unit in pi.""" + def __init__(self, *, x : builtins.int = ..., y : builtins.int = ..., w : builtins.int = ..., h : builtins.int = ..., + rotate_angle : builtins.float = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["h",b"h","w",b"w","x",b"x","y",b"y"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["h",b"h","rotate_angle",b"rotate_angle","w",b"w","x",b"x","y",b"y"]) -> None: ... global___Rect = Rect class MirKeywords(google.protobuf.message.Message): """/ ========== keywords.mir ==========""" DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - class KeywordsEntry(google.protobuf.message.Message): + class CkIdxEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int key: typing.Text = ... @property - def value(self) -> global___Keywords: ... + def value(self) -> global___AssetAnnoIndex: ... def __init__(self, *, key : typing.Text = ..., - value : typing.Optional[global___Keywords] = ..., + value : typing.Optional[global___AssetAnnoIndex] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - class IndexPredifinedKeyidsEntry(google.protobuf.message.Message): + PRED_IDX_FIELD_NUMBER: builtins.int + GT_IDX_FIELD_NUMBER: builtins.int + CK_IDX_FIELD_NUMBER: builtins.int + @property + def pred_idx(self) -> global___CiTagToIndex: + """ci to assets, generated from preds""" + pass + @property + def gt_idx(self) -> global___CiTagToIndex: + """ci to assets, generated from gt""" + pass + @property + def ck_idx(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___AssetAnnoIndex]: + """key: ck main key, value: assets and assets with sub keys, from (mir_annotations.image_cks) pred and gt""" + pass + def __init__(self, + *, + pred_idx : typing.Optional[global___CiTagToIndex] = ..., + gt_idx : typing.Optional[global___CiTagToIndex] = ..., + ck_idx : typing.Optional[typing.Mapping[typing.Text, global___AssetAnnoIndex]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["gt_idx",b"gt_idx","pred_idx",b"pred_idx"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["ck_idx",b"ck_idx","gt_idx",b"gt_idx","pred_idx",b"pred_idx"]) -> None: ... +global___MirKeywords = MirKeywords + +class CiTagToIndex(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class CisEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int key: builtins.int = ... @property - def value(self) -> global___Assets: ... + def value(self) -> global___MapStringToInt32List: ... def __init__(self, *, key : builtins.int = ..., - value : typing.Optional[global___Assets] = ..., + value : typing.Optional[global___MapStringToInt32List] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - KEYWORDS_FIELD_NUMBER: builtins.int - INDEX_PREDIFINED_KEYIDS_FIELD_NUMBER: builtins.int + class TagsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___AssetAnnoIndex: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___AssetAnnoIndex] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + CIS_FIELD_NUMBER: builtins.int + TAGS_FIELD_NUMBER: builtins.int @property - def keywords(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___Keywords]: - """key: asset hash, value: keywords list - cnt: count of keywords - """ + def cis(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___MapStringToInt32List]: + """key: ci, value: annos""" pass @property - def index_predifined_keyids(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___Assets]: - """key: class id, value: assert ids""" + def tags(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___AssetAnnoIndex]: + """key: ck main key, value: annos and annos with sub keys""" pass def __init__(self, *, - keywords : typing.Optional[typing.Mapping[typing.Text, global___Keywords]] = ..., - index_predifined_keyids : typing.Optional[typing.Mapping[builtins.int, global___Assets]] = ..., + cis : typing.Optional[typing.Mapping[builtins.int, global___MapStringToInt32List]] = ..., + tags : typing.Optional[typing.Mapping[typing.Text, global___AssetAnnoIndex]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["index_predifined_keyids",b"index_predifined_keyids","keywords",b"keywords"]) -> None: ... -global___MirKeywords = MirKeywords + def ClearField(self, field_name: typing_extensions.Literal["cis",b"cis","tags",b"tags"]) -> None: ... +global___CiTagToIndex = CiTagToIndex -class Assets(google.protobuf.message.Message): +class StringList(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... ASSET_IDS_FIELD_NUMBER: builtins.int @property @@ -446,27 +781,96 @@ class Assets(google.protobuf.message.Message): asset_ids : typing.Optional[typing.Iterable[typing.Text]] = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["asset_ids",b"asset_ids"]) -> None: ... -global___Assets = Assets +global___StringList = StringList -class Keywords(google.protobuf.message.Message): +class MapStringToInt32List(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - PREDIFINED_KEYIDS_FIELD_NUMBER: builtins.int - CUSTOMIZED_KEYWORDS_FIELD_NUMBER: builtins.int + class KeyIdsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___Int32List: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___Int32List] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + KEY_IDS_FIELD_NUMBER: builtins.int @property - def predifined_keyids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: - """predefined: managed id-keyword map""" + def key_ids(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___Int32List]: ... + def __init__(self, + *, + key_ids : typing.Optional[typing.Mapping[typing.Text, global___Int32List]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["key_ids",b"key_ids"]) -> None: ... +global___MapStringToInt32List = MapStringToInt32List + +class Int32List(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + IDS_FIELD_NUMBER: builtins.int + @property + def ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + def __init__(self, + *, + ids : typing.Optional[typing.Iterable[builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ids",b"ids"]) -> None: ... +global___Int32List = Int32List + +class AssetAnnoIndex(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class AssetAnnosEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___Int32List: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___Int32List] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + class SubIndexesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___MapStringToInt32List: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___MapStringToInt32List] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + ASSET_ANNOS_FIELD_NUMBER: builtins.int + SUB_INDEXES_FIELD_NUMBER: builtins.int + @property + def asset_annos(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___Int32List]: + """key: asset id, value: annotation indexes""" pass @property - def customized_keywords(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: - """customized: arbitrary user defined keywords""" + def sub_indexes(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___MapStringToInt32List]: + """key: ck value, value: asset and it's annotation indexes""" pass def __init__(self, *, - predifined_keyids : typing.Optional[typing.Iterable[builtins.int]] = ..., - customized_keywords : typing.Optional[typing.Iterable[typing.Text]] = ..., + asset_annos : typing.Optional[typing.Mapping[typing.Text, global___Int32List]] = ..., + sub_indexes : typing.Optional[typing.Mapping[typing.Text, global___MapStringToInt32List]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["customized_keywords",b"customized_keywords","predifined_keyids",b"predifined_keyids"]) -> None: ... -global___Keywords = Keywords + def ClearField(self, field_name: typing_extensions.Literal["asset_annos",b"asset_annos","sub_indexes",b"sub_indexes"]) -> None: ... +global___AssetAnnoIndex = AssetAnnoIndex class MirTasks(google.protobuf.message.Message): """/ ========== tasks.mir ==========""" @@ -501,7 +905,7 @@ global___MirTasks = MirTasks class Task(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - class UnknownTypesEntry(google.protobuf.message.Message): + class NewTypesEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int @@ -519,10 +923,11 @@ class Task(google.protobuf.message.Message): TASK_ID_FIELD_NUMBER: builtins.int TIMESTAMP_FIELD_NUMBER: builtins.int MODEL_FIELD_NUMBER: builtins.int - UNKNOWN_TYPES_FIELD_NUMBER: builtins.int RETURN_CODE_FIELD_NUMBER: builtins.int RETURN_MSG_FIELD_NUMBER: builtins.int EVALUATION_FIELD_NUMBER: builtins.int + NEW_TYPES_FIELD_NUMBER: builtins.int + NEW_TYPES_ADDED_FIELD_NUMBER: builtins.int SERIALIZED_TASK_PARAMETERS_FIELD_NUMBER: builtins.int SERIALIZED_EXECUTOR_CONFIG_FIELD_NUMBER: builtins.int SRC_REVS_FIELD_NUMBER: builtins.int @@ -542,16 +947,19 @@ class Task(google.protobuf.message.Message): @property def model(self) -> global___ModelMeta: - """/ (special for training task): result model for cmd train""" - pass - @property - def unknown_types(self) -> google.protobuf.internal.containers.ScalarMap[typing.Text, builtins.int]: - """/ (special for import task): unknown types for cmd import""" + """/ (for training task): result model for cmd train""" pass return_code: builtins.int = ... return_msg: typing.Text = ... @property def evaluation(self) -> global___Evaluation: ... + @property + def new_types(self) -> google.protobuf.internal.containers.ScalarMap[typing.Text, builtins.int]: + """/ (for import task): new types for cmd import, key: class name, value: asset count""" + pass + new_types_added: builtins.bool = ... + """/ (for import task): reason for new types, True: added, False: ignored""" + serialized_task_parameters: typing.Text = ... serialized_executor_config: typing.Text = ... src_revs: typing.Text = ... @@ -564,10 +972,11 @@ class Task(google.protobuf.message.Message): task_id : typing.Text = ..., timestamp : builtins.int = ..., model : typing.Optional[global___ModelMeta] = ..., - unknown_types : typing.Optional[typing.Mapping[typing.Text, builtins.int]] = ..., return_code : builtins.int = ..., return_msg : typing.Text = ..., evaluation : typing.Optional[global___Evaluation] = ..., + new_types : typing.Optional[typing.Mapping[typing.Text, builtins.int]] = ..., + new_types_added : builtins.bool = ..., serialized_task_parameters : typing.Text = ..., serialized_executor_config : typing.Text = ..., src_revs : typing.Text = ..., @@ -575,14 +984,32 @@ class Task(google.protobuf.message.Message): executor : typing.Text = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["evaluation",b"evaluation","model",b"model"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["dst_rev",b"dst_rev","evaluation",b"evaluation","executor",b"executor","model",b"model","name",b"name","return_code",b"return_code","return_msg",b"return_msg","serialized_executor_config",b"serialized_executor_config","serialized_task_parameters",b"serialized_task_parameters","src_revs",b"src_revs","task_id",b"task_id","timestamp",b"timestamp","type",b"type","unknown_types",b"unknown_types"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["dst_rev",b"dst_rev","evaluation",b"evaluation","executor",b"executor","model",b"model","name",b"name","new_types",b"new_types","new_types_added",b"new_types_added","return_code",b"return_code","return_msg",b"return_msg","serialized_executor_config",b"serialized_executor_config","serialized_task_parameters",b"serialized_task_parameters","src_revs",b"src_revs","task_id",b"task_id","timestamp",b"timestamp","type",b"type"]) -> None: ... global___Task = Task class ModelMeta(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class StagesEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___ModelStage: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___ModelStage] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + MODEL_HASH_FIELD_NUMBER: builtins.int MEAN_AVERAGE_PRECISION_FIELD_NUMBER: builtins.int CONTEXT_FIELD_NUMBER: builtins.int + STAGES_FIELD_NUMBER: builtins.int + BEST_STAGE_NAME_FIELD_NUMBER: builtins.int + CLASS_NAMES_FIELD_NUMBER: builtins.int model_hash: typing.Text = ... """/ hash for models.tar.gz""" @@ -592,18 +1019,47 @@ class ModelMeta(google.protobuf.message.Message): context: typing.Text = ... """/ context generated by train command""" + @property + def stages(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___ModelStage]: ... + best_stage_name: typing.Text = ... + @property + def class_names(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... def __init__(self, *, model_hash : typing.Text = ..., mean_average_precision : builtins.float = ..., context : typing.Text = ..., + stages : typing.Optional[typing.Mapping[typing.Text, global___ModelStage]] = ..., + best_stage_name : typing.Text = ..., + class_names : typing.Optional[typing.Iterable[typing.Text]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["context",b"context","mean_average_precision",b"mean_average_precision","model_hash",b"model_hash"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["best_stage_name",b"best_stage_name","class_names",b"class_names","context",b"context","mean_average_precision",b"mean_average_precision","model_hash",b"model_hash","stages",b"stages"]) -> None: ... global___ModelMeta = ModelMeta +class ModelStage(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + STAGE_NAME_FIELD_NUMBER: builtins.int + FILES_FIELD_NUMBER: builtins.int + TIMESTAMP_FIELD_NUMBER: builtins.int + MAP_FIELD_NUMBER: builtins.int + stage_name: typing.Text = ... + @property + def files(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + timestamp: builtins.int = ... + mAP: builtins.float = ... + def __init__(self, + *, + stage_name : typing.Text = ..., + files : typing.Optional[typing.Iterable[typing.Text]] = ..., + timestamp : builtins.int = ..., + mAP : builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["files",b"files","mAP",b"mAP","stage_name",b"stage_name","timestamp",b"timestamp"]) -> None: ... +global___ModelStage = ModelStage + class Evaluation(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - class DatasetEvaluationsEntry(google.protobuf.message.Message): + class SubCksEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int @@ -619,44 +1075,53 @@ class Evaluation(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... CONFIG_FIELD_NUMBER: builtins.int - DATASET_EVALUATIONS_FIELD_NUMBER: builtins.int + DATASET_EVALUATION_FIELD_NUMBER: builtins.int + MAIN_CK_FIELD_NUMBER: builtins.int + SUB_CKS_FIELD_NUMBER: builtins.int + STATE_FIELD_NUMBER: builtins.int @property def config(self) -> global___EvaluateConfig: ... @property - def dataset_evaluations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleDatasetEvaluation]: - """key: prediction dataset id, value: evaluation result for ground truth and prediction dataset""" - pass + def dataset_evaluation(self) -> global___SingleDatasetEvaluation: ... + @property + def main_ck(self) -> global___SingleDatasetEvaluation: ... + @property + def sub_cks(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleDatasetEvaluation]: ... + state: global___EvaluationState.V = ... def __init__(self, *, config : typing.Optional[global___EvaluateConfig] = ..., - dataset_evaluations : typing.Optional[typing.Mapping[typing.Text, global___SingleDatasetEvaluation]] = ..., + dataset_evaluation : typing.Optional[global___SingleDatasetEvaluation] = ..., + main_ck : typing.Optional[global___SingleDatasetEvaluation] = ..., + sub_cks : typing.Optional[typing.Mapping[typing.Text, global___SingleDatasetEvaluation]] = ..., + state : global___EvaluationState.V = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["config",b"config"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["config",b"config","dataset_evaluations",b"dataset_evaluations"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["config",b"config","dataset_evaluation",b"dataset_evaluation","main_ck",b"main_ck"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["config",b"config","dataset_evaluation",b"dataset_evaluation","main_ck",b"main_ck","state",b"state","sub_cks",b"sub_cks"]) -> None: ... global___Evaluation = Evaluation class EvaluateConfig(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - GT_DATASET_ID_FIELD_NUMBER: builtins.int - PRED_DATASET_IDS_FIELD_NUMBER: builtins.int CONF_THR_FIELD_NUMBER: builtins.int IOU_THRS_INTERVAL_FIELD_NUMBER: builtins.int NEED_PR_CURVE_FIELD_NUMBER: builtins.int - gt_dataset_id: typing.Text = ... - @property - def pred_dataset_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + CLASS_IDS_FIELD_NUMBER: builtins.int + MAIN_CK_FIELD_NUMBER: builtins.int conf_thr: builtins.float = ... iou_thrs_interval: typing.Text = ... need_pr_curve: builtins.bool = ... + @property + def class_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ... + main_ck: typing.Text = ... def __init__(self, *, - gt_dataset_id : typing.Text = ..., - pred_dataset_ids : typing.Optional[typing.Iterable[typing.Text]] = ..., conf_thr : builtins.float = ..., iou_thrs_interval : typing.Text = ..., need_pr_curve : builtins.bool = ..., + class_ids : typing.Optional[typing.Iterable[builtins.int]] = ..., + main_ck : typing.Text = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["conf_thr",b"conf_thr","gt_dataset_id",b"gt_dataset_id","iou_thrs_interval",b"iou_thrs_interval","need_pr_curve",b"need_pr_curve","pred_dataset_ids",b"pred_dataset_ids"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["class_ids",b"class_ids","conf_thr",b"conf_thr","iou_thrs_interval",b"iou_thrs_interval","main_ck",b"main_ck","need_pr_curve",b"need_pr_curve"]) -> None: ... global___EvaluateConfig = EvaluateConfig class SingleDatasetEvaluation(google.protobuf.message.Message): @@ -677,13 +1142,9 @@ class SingleDatasetEvaluation(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... CONF_THR_FIELD_NUMBER: builtins.int - GT_DATASET_ID_FIELD_NUMBER: builtins.int - PRED_DATASET_ID_FIELD_NUMBER: builtins.int IOU_EVALUATIONS_FIELD_NUMBER: builtins.int IOU_AVERAGED_EVALUATION_FIELD_NUMBER: builtins.int conf_thr: builtins.float = ... - gt_dataset_id: typing.Text = ... - pred_dataset_id: typing.Text = ... @property def iou_evaluations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleIouEvaluation]: """key: string of iou threshold""" @@ -695,13 +1156,11 @@ class SingleDatasetEvaluation(google.protobuf.message.Message): def __init__(self, *, conf_thr : builtins.float = ..., - gt_dataset_id : typing.Text = ..., - pred_dataset_id : typing.Text = ..., iou_evaluations : typing.Optional[typing.Mapping[typing.Text, global___SingleIouEvaluation]] = ..., iou_averaged_evaluation : typing.Optional[global___SingleIouEvaluation] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["iou_averaged_evaluation",b"iou_averaged_evaluation"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["conf_thr",b"conf_thr","gt_dataset_id",b"gt_dataset_id","iou_averaged_evaluation",b"iou_averaged_evaluation","iou_evaluations",b"iou_evaluations","pred_dataset_id",b"pred_dataset_id"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["conf_thr",b"conf_thr","iou_averaged_evaluation",b"iou_averaged_evaluation","iou_evaluations",b"iou_evaluations"]) -> None: ... global___SingleDatasetEvaluation = SingleDatasetEvaluation class SingleIouEvaluation(google.protobuf.message.Message): @@ -712,56 +1171,35 @@ class SingleIouEvaluation(google.protobuf.message.Message): VALUE_FIELD_NUMBER: builtins.int key: builtins.int = ... @property - def value(self) -> global___SingleTopicEvaluation: ... + def value(self) -> global___SingleEvaluationElement: ... def __init__(self, *, key : builtins.int = ..., - value : typing.Optional[global___SingleTopicEvaluation] = ..., - ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - - class TopicEvaluationsEntry(google.protobuf.message.Message): - DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - KEY_FIELD_NUMBER: builtins.int - VALUE_FIELD_NUMBER: builtins.int - key: typing.Text = ... - @property - def value(self) -> global___SingleTopicEvaluation: ... - def __init__(self, - *, - key : typing.Text = ..., - value : typing.Optional[global___SingleTopicEvaluation] = ..., + value : typing.Optional[global___SingleEvaluationElement] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... CI_EVALUATIONS_FIELD_NUMBER: builtins.int CI_AVERAGED_EVALUATION_FIELD_NUMBER: builtins.int - TOPIC_EVALUATIONS_FIELD_NUMBER: builtins.int @property - def ci_evaluations(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___SingleTopicEvaluation]: + def ci_evaluations(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___SingleEvaluationElement]: """key: class ids""" pass @property - def ci_averaged_evaluation(self) -> global___SingleTopicEvaluation: + def ci_averaged_evaluation(self) -> global___SingleEvaluationElement: """evaluations averaged by class ids""" pass - @property - def topic_evaluations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleTopicEvaluation]: - """key: topic names""" - pass def __init__(self, *, - ci_evaluations : typing.Optional[typing.Mapping[builtins.int, global___SingleTopicEvaluation]] = ..., - ci_averaged_evaluation : typing.Optional[global___SingleTopicEvaluation] = ..., - topic_evaluations : typing.Optional[typing.Mapping[typing.Text, global___SingleTopicEvaluation]] = ..., + ci_evaluations : typing.Optional[typing.Mapping[builtins.int, global___SingleEvaluationElement]] = ..., + ci_averaged_evaluation : typing.Optional[global___SingleEvaluationElement] = ..., ) -> None: ... def HasField(self, field_name: typing_extensions.Literal["ci_averaged_evaluation",b"ci_averaged_evaluation"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["ci_averaged_evaluation",b"ci_averaged_evaluation","ci_evaluations",b"ci_evaluations","topic_evaluations",b"topic_evaluations"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ci_averaged_evaluation",b"ci_averaged_evaluation","ci_evaluations",b"ci_evaluations"]) -> None: ... global___SingleIouEvaluation = SingleIouEvaluation -class SingleTopicEvaluation(google.protobuf.message.Message): +class SingleEvaluationElement(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... AP_FIELD_NUMBER: builtins.int AR_FIELD_NUMBER: builtins.int @@ -786,99 +1224,232 @@ class SingleTopicEvaluation(google.protobuf.message.Message): pr_curve : typing.Optional[typing.Iterable[global___FloatPoint]] = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["ap",b"ap","ar",b"ar","fn",b"fn","fp",b"fp","pr_curve",b"pr_curve","tp",b"tp"]) -> None: ... -global___SingleTopicEvaluation = SingleTopicEvaluation +global___SingleEvaluationElement = SingleEvaluationElement + +class IntPoint(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + X_FIELD_NUMBER: builtins.int + Y_FIELD_NUMBER: builtins.int + Z_FIELD_NUMBER: builtins.int + x: builtins.int = ... + y: builtins.int = ... + z: builtins.int = ... + def __init__(self, + *, + x : builtins.int = ..., + y : builtins.int = ..., + z : builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["x",b"x","y",b"y","z",b"z"]) -> None: ... +global___IntPoint = IntPoint class FloatPoint(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... X_FIELD_NUMBER: builtins.int Y_FIELD_NUMBER: builtins.int + Z_FIELD_NUMBER: builtins.int x: builtins.float = ... y: builtins.float = ... + z: builtins.float = ... def __init__(self, *, x : builtins.float = ..., y : builtins.float = ..., + z : builtins.float = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["x",b"x","y",b"y"]) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["x",b"x","y",b"y","z",b"z"]) -> None: ... global___FloatPoint = FloatPoint class MirContext(google.protobuf.message.Message): """/ ========== context.mir ==========""" DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... - class PredefinedKeyidsCntEntry(google.protobuf.message.Message): + class CksCntEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int - key: builtins.int = ... - value: builtins.int = ... + key: typing.Text = ... + @property + def value(self) -> global___SingleMapCount: ... def __init__(self, *, - key : builtins.int = ..., - value : builtins.int = ..., + key : typing.Text = ..., + value : typing.Optional[global___SingleMapCount] = ..., ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - class ProjectPredefinedKeyidsCntEntry(google.protobuf.message.Message): + IMAGES_CNT_FIELD_NUMBER: builtins.int + CKS_CNT_FIELD_NUMBER: builtins.int + TOTAL_ASSET_MBYTES_FIELD_NUMBER: builtins.int + PRED_STATS_FIELD_NUMBER: builtins.int + GT_STATS_FIELD_NUMBER: builtins.int + images_cnt: builtins.int = ... + """/ total images count""" + + @property + def cks_cnt(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleMapCount]: + """/ from pred and gt""" + pass + total_asset_mbytes: builtins.int = ... + @property + def pred_stats(self) -> global___AnnoStats: ... + @property + def gt_stats(self) -> global___AnnoStats: ... + def __init__(self, + *, + images_cnt : builtins.int = ..., + cks_cnt : typing.Optional[typing.Mapping[typing.Text, global___SingleMapCount]] = ..., + total_asset_mbytes : builtins.int = ..., + pred_stats : typing.Optional[global___AnnoStats] = ..., + gt_stats : typing.Optional[global___AnnoStats] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["gt_stats",b"gt_stats","pred_stats",b"pred_stats"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["cks_cnt",b"cks_cnt","gt_stats",b"gt_stats","images_cnt",b"images_cnt","pred_stats",b"pred_stats","total_asset_mbytes",b"total_asset_mbytes"]) -> None: ... +global___MirContext = MirContext + +class SingleMapCount(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class SubCntEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int - key: builtins.int = ... + key: typing.Text = ... value: builtins.int = ... def __init__(self, *, - key : builtins.int = ..., + key : typing.Text = ..., value : builtins.int = ..., ) -> None: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - class CustomizedKeywordsCntEntry(google.protobuf.message.Message): + CNT_FIELD_NUMBER: builtins.int + SUB_CNT_FIELD_NUMBER: builtins.int + cnt: builtins.int = ... + @property + def sub_cnt(self) -> google.protobuf.internal.containers.ScalarMap[typing.Text, builtins.int]: ... + def __init__(self, + *, + cnt : builtins.int = ..., + sub_cnt : typing.Optional[typing.Mapping[typing.Text, builtins.int]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["cnt",b"cnt","sub_cnt",b"sub_cnt"]) -> None: ... +global___SingleMapCount = SingleMapCount + +class AnnoStats(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class TagsCntEntry(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... KEY_FIELD_NUMBER: builtins.int VALUE_FIELD_NUMBER: builtins.int key: typing.Text = ... - value: builtins.int = ... + @property + def value(self) -> global___SingleMapCount: ... def __init__(self, *, key : typing.Text = ..., - value : builtins.int = ..., + value : typing.Optional[global___SingleMapCount] = ..., ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... - IMAGES_CNT_FIELD_NUMBER: builtins.int - NEGATIVE_IMAGES_CNT_FIELD_NUMBER: builtins.int - PROJECT_NEGATIVE_IMAGES_CNT_FIELD_NUMBER: builtins.int - PREDEFINED_KEYIDS_CNT_FIELD_NUMBER: builtins.int - PROJECT_PREDEFINED_KEYIDS_CNT_FIELD_NUMBER: builtins.int - CUSTOMIZED_KEYWORDS_CNT_FIELD_NUMBER: builtins.int - images_cnt: builtins.int = ... - """/ total images count""" - - negative_images_cnt: builtins.int = ... - """/ total negative images count (images without any annotations)""" - - project_negative_images_cnt: builtins.int = ... - """/ total negative images count (images without any project class names)""" + class ClassIdsCntEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int = ... + value: builtins.int = ... + def __init__(self, + *, + key : builtins.int = ..., + value : builtins.int = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + TOTAL_CNT_FIELD_NUMBER: builtins.int + POSITIVE_ASSET_CNT_FIELD_NUMBER: builtins.int + NEGATIVE_ASSET_CNT_FIELD_NUMBER: builtins.int + TAGS_CNT_FIELD_NUMBER: builtins.int + CLASS_IDS_CNT_FIELD_NUMBER: builtins.int + EVAL_CLASS_IDS_FIELD_NUMBER: builtins.int + total_cnt: builtins.int = ... + positive_asset_cnt: builtins.int = ... + negative_asset_cnt: builtins.int = ... @property - def predefined_keyids_cnt(self) -> google.protobuf.internal.containers.ScalarMap[builtins.int, builtins.int]: - """/ key: class id, value: images count""" + def tags_cnt(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleMapCount]: + """key: main tag name, value: main tag count and sub tag names and counts""" pass @property - def project_predefined_keyids_cnt(self) -> google.protobuf.internal.containers.ScalarMap[builtins.int, builtins.int]: - """/ key: class id (only in this project), value: images count""" + def class_ids_cnt(self) -> google.protobuf.internal.containers.ScalarMap[builtins.int, builtins.int]: + """key: class ids, value: asset count for this class id""" pass @property - def customized_keywords_cnt(self) -> google.protobuf.internal.containers.ScalarMap[typing.Text, builtins.int]: - """/ key: customized keywords, value: images count""" + def eval_class_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: + """Shortcut of class_ids for evaluation (dup. field as in SingleTaskAnnotations).""" pass def __init__(self, *, - images_cnt : builtins.int = ..., - negative_images_cnt : builtins.int = ..., - project_negative_images_cnt : builtins.int = ..., - predefined_keyids_cnt : typing.Optional[typing.Mapping[builtins.int, builtins.int]] = ..., - project_predefined_keyids_cnt : typing.Optional[typing.Mapping[builtins.int, builtins.int]] = ..., - customized_keywords_cnt : typing.Optional[typing.Mapping[typing.Text, builtins.int]] = ..., + total_cnt : builtins.int = ..., + positive_asset_cnt : builtins.int = ..., + negative_asset_cnt : builtins.int = ..., + tags_cnt : typing.Optional[typing.Mapping[typing.Text, global___SingleMapCount]] = ..., + class_ids_cnt : typing.Optional[typing.Mapping[builtins.int, builtins.int]] = ..., + eval_class_ids : typing.Optional[typing.Iterable[builtins.int]] = ..., ) -> None: ... - def ClearField(self, field_name: typing_extensions.Literal["customized_keywords_cnt",b"customized_keywords_cnt","images_cnt",b"images_cnt","negative_images_cnt",b"negative_images_cnt","predefined_keyids_cnt",b"predefined_keyids_cnt","project_negative_images_cnt",b"project_negative_images_cnt","project_predefined_keyids_cnt",b"project_predefined_keyids_cnt"]) -> None: ... -global___MirContext = MirContext + def ClearField(self, field_name: typing_extensions.Literal["class_ids_cnt",b"class_ids_cnt","eval_class_ids",b"eval_class_ids","negative_asset_cnt",b"negative_asset_cnt","positive_asset_cnt",b"positive_asset_cnt","tags_cnt",b"tags_cnt","total_cnt",b"total_cnt"]) -> None: ... +global___AnnoStats = AnnoStats + +class ExportConfig(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + ASSET_FORMAT_FIELD_NUMBER: builtins.int + ASSET_DIR_FIELD_NUMBER: builtins.int + ASSET_INDEX_FILE_FIELD_NUMBER: builtins.int + ASSET_INDEX_PREFIX_FIELD_NUMBER: builtins.int + MEDIA_LOCATION_FIELD_NUMBER: builtins.int + NEED_SUB_FOLDER_FIELD_NUMBER: builtins.int + ANNO_FORMAT_FIELD_NUMBER: builtins.int + GT_DIR_FIELD_NUMBER: builtins.int + GT_INDEX_FILE_FIELD_NUMBER: builtins.int + GT_INDEX_PREFIX_FIELD_NUMBER: builtins.int + PRED_DIR_FIELD_NUMBER: builtins.int + PRED_INDEX_FILE_FIELD_NUMBER: builtins.int + PRED_INDEX_PREFIX_FIELD_NUMBER: builtins.int + TVT_INDEX_DIR_FIELD_NUMBER: builtins.int + asset_format: global___AssetFormat.V = ... + """Asset config.""" + + asset_dir: typing.Text = ... + asset_index_file: typing.Text = ... + asset_index_prefix: typing.Text = ... + """Index file writes abs path. In TMI case, path should be converted, e.g. /in/assets.""" + + media_location: typing.Text = ... + need_sub_folder: builtins.bool = ... + anno_format: global___AnnoFormat.V = ... + """Annotation config.""" + + gt_dir: typing.Text = ... + gt_index_file: typing.Text = ... + gt_index_prefix: typing.Text = ... + pred_dir: typing.Text = ... + pred_index_file: typing.Text = ... + pred_index_prefix: typing.Text = ... + tvt_index_dir: typing.Text = ... + def __init__(self, + *, + asset_format : global___AssetFormat.V = ..., + asset_dir : typing.Text = ..., + asset_index_file : typing.Text = ..., + asset_index_prefix : typing.Text = ..., + media_location : typing.Text = ..., + need_sub_folder : builtins.bool = ..., + anno_format : global___AnnoFormat.V = ..., + gt_dir : typing.Text = ..., + gt_index_file : typing.Text = ..., + gt_index_prefix : typing.Text = ..., + pred_dir : typing.Text = ..., + pred_index_file : typing.Text = ..., + pred_index_prefix : typing.Text = ..., + tvt_index_dir : typing.Text = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["anno_format",b"anno_format","asset_dir",b"asset_dir","asset_format",b"asset_format","asset_index_file",b"asset_index_file","asset_index_prefix",b"asset_index_prefix","gt_dir",b"gt_dir","gt_index_file",b"gt_index_file","gt_index_prefix",b"gt_index_prefix","media_location",b"media_location","need_sub_folder",b"need_sub_folder","pred_dir",b"pred_dir","pred_index_file",b"pred_index_file","pred_index_prefix",b"pred_index_prefix","tvt_index_dir",b"tvt_index_dir"]) -> None: ... +global___ExportConfig = ExportConfig diff --git a/ymir/command/mir/tools/annotations.py b/ymir/command/mir/tools/annotations.py index b15d5ac39b..c5584914a0 100644 --- a/ymir/command/mir/tools/annotations.py +++ b/ymir/command/mir/tools/annotations.py @@ -1,9 +1,15 @@ from collections import defaultdict +import enum +import io +import json import logging import os -from typing import Dict, List, Optional, Tuple +from PIL import Image, UnidentifiedImageError +from typing import Any, Callable, Dict, List, Set, Tuple, Union -import xml.dom.minidom +from google.protobuf.json_format import ParseDict +import xmltodict +import yaml from mir.tools import class_ids from mir.tools.code import MirCode @@ -12,153 +18,329 @@ from mir.protos import mir_command_pb2 as mirpb -def _get_dom_xml_tag_node(node: xml.dom.minidom.Element, tag_name: str) -> Optional[xml.dom.minidom.Element]: - """ - suppose we have the following xml: - ``` - - tag1_value - tag2_value - - ``` - and we have node point to , we can use this function to get node tag1 and tag2 \n - if tag not found, returns None - """ - tag_nodes = node.getElementsByTagName(tag_name) - if len(tag_nodes) > 0 and len(tag_nodes[0].childNodes) > 0: - return tag_nodes[0] - return None - - -def _get_dom_xml_tag_data(node: xml.dom.minidom.Element, tag_name: str) -> str: - """ - suppose we have the following xml: - ``` - - tag1_value - tag2_value - - ``` - and we have node point to , we can use this function to get tag1_value and tag2_value \n - if tag not found, returns empty str - """ - tag_node = _get_dom_xml_tag_node(node, tag_name) - if tag_node and len(tag_node.childNodes) > 0: - return tag_node.childNodes[0].data - return '' - - -def _xml_obj_to_annotation(obj: xml.dom.minidom.Element, - class_type_manager: class_ids.ClassIdManager) -> mirpb.Annotation: - """ - generate mirpb.Annotation instance from object node in coco and pascal annotation xml file - """ - name = _xml_obj_to_type_name(obj) - bndbox_node = _get_dom_xml_tag_node(obj, "bndbox") - if not bndbox_node: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='found no value for bndbox') - - xmin = int(float(_get_dom_xml_tag_data(bndbox_node, "xmin"))) - ymin = int(float(_get_dom_xml_tag_data(bndbox_node, "ymin"))) - xmax = int(float(_get_dom_xml_tag_data(bndbox_node, "xmax"))) - ymax = int(float(_get_dom_xml_tag_data(bndbox_node, "ymax"))) - width = xmax - xmin + 1 - height = ymax - ymin + 1 - - # there's no `score` key in original voc format, we add it here to support box conf score - score_str = _get_dom_xml_tag_data(obj, 'score') - score = float(score_str) if score_str else 2.0 - - annotation = mirpb.Annotation() - annotation.class_id = class_type_manager.id_and_main_name_for_name(name)[0] - annotation.box.x = xmin - annotation.box.y = ymin - annotation.box.w = width - annotation.box.h = height - annotation.score = score - return annotation +class UnknownTypesStrategy(str, enum.Enum): + STOP = 'stop' + IGNORE = 'ignore' + ADD = 'add' + + +def parse_anno_format(anno_format_str: str) -> "mirpb.AnnoFormat.V": + _anno_dict: Dict[str, mirpb.AnnoFormat.V] = { + # compatible with legacy format. + "voc": mirpb.AnnoFormat.AF_DET_PASCAL_VOC, + "ark": mirpb.AnnoFormat.AF_DET_ARK_JSON, + "ls_json": mirpb.AnnoFormat.AF_DET_LS_JSON, + "det-voc": mirpb.AnnoFormat.AF_DET_PASCAL_VOC, + "det-ark": mirpb.AnnoFormat.AF_DET_ARK_JSON, + "det-ls-json": mirpb.AnnoFormat.AF_DET_LS_JSON, + "seg-poly": mirpb.AnnoFormat.AF_SEG_POLYGON, + "seg-mask": mirpb.AnnoFormat.AF_SEG_MASK, + } + return _anno_dict.get(anno_format_str.lower(), mirpb.AnnoFormat.AF_NO_ANNOTATION) -def _xml_obj_to_type_name(obj: xml.dom.minidom.Element) -> str: - return _get_dom_xml_tag_data(obj, "name").lower() +def parse_anno_type(anno_type_str: str) -> "mirpb.AnnoType.V": + _anno_dict: Dict[str, mirpb.AnnoType.V] = { + "det-box": mirpb.AnnoType.AT_DET_BOX, + "seg-poly": mirpb.AnnoType.AT_SEG_POLYGON, + "seg-mask": mirpb.AnnoType.AT_SEG_MASK, + } + return _anno_dict.get(anno_type_str.lower(), mirpb.AnnoType.AT_UNKNOWN) -def import_annotations(mir_metadatas: mirpb.MirMetadatas, mir_annotation: mirpb.MirAnnotations, - in_sha1_file: str, mir_root: str, - annotations_dir_path: str, task_id: str, phase: str) -> Tuple[int, Dict[str, int]]: - """ - imports annotations +def _annotation_parse_func(anno_type: "mirpb.AnnoType.V") -> Callable: + _func_dict: Dict["mirpb.AnnoType.V", Callable] = { + mirpb.AnnoType.AT_DET_BOX: _import_annotations_voc_xml, + mirpb.AnnoType.AT_SEG_POLYGON: _import_annotations_voc_xml, + mirpb.AnnoType.AT_SEG_MASK: _import_annotations_seg_mask, + } + if anno_type not in _func_dict: + raise NotImplementedError + return _func_dict[anno_type] - Args: - mir_annotation (mirpb.MirAnnotations): data buf for annotations.mir - mir_keywords (mirpb.MirKeywords): data buf for keywords.mir - in_sha1_file (str): path to sha1 file - mir_root (str): path to mir repo - annotations_dir_path (str): path to annotations root - task_id (str): task id - phase (str): process phase - Returns: - Tuple[int, Dict[str, int]]: return code and unknown type names - """ - unknown_types_and_count: Dict[str, int] = defaultdict(int) +def _object_dict_to_annotation(object_dict: dict, cid: int) -> mirpb.ObjectAnnotation: + # Fill shared fields. + annotation = mirpb.ObjectAnnotation() + annotation.class_id = cid + annotation.score = float(object_dict.get('confidence', '-1.0')) + annotation.anno_quality = float(object_dict.get('box_quality', '-1.0')) + tags = object_dict.get('tags', {}) # tags could be None + if tags: + annotation.tags.update(tags) + + if object_dict.get('bndbox'): + bndbox_dict: Dict[str, Any] = object_dict['bndbox'] + xmin = int(float(bndbox_dict['xmin'])) + ymin = int(float(bndbox_dict['ymin'])) + xmax = int(float(bndbox_dict['xmax'])) + ymax = int(float(bndbox_dict['ymax'])) + width = xmax - xmin + 1 + height = ymax - ymin + 1 + + annotation.box.x = xmin + annotation.box.y = ymin + annotation.box.w = width + annotation.box.h = height + annotation.box.rotate_angle = float(bndbox_dict.get('rotate_angle', '0.0')) + elif object_dict.get('polygon'): + raise NotImplementedError + else: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='no value for bndbox or polygon') + return annotation - if not in_sha1_file: - logging.error("empty sha1_file") - return MirCode.RC_CMD_INVALID_ARGS, unknown_types_and_count + +def import_annotations(mir_annotation: mirpb.MirAnnotations, mir_root: str, prediction_dir_path: str, + groundtruth_dir_path: str, map_hashed_filename: Dict[str, str], + unknown_types_strategy: UnknownTypesStrategy, anno_type: "mirpb.AnnoType.V", + phase: str) -> Dict[str, int]: + anno_import_result: Dict[str, int] = defaultdict(int) # read type_id_name_dict and type_name_id_dict - class_type_manager = class_ids.ClassIdManager(mir_root=mir_root) - logging.info("loaded type id and names: %d", class_type_manager.size()) - - image_annotations = mir_annotation.task_annotations[task_id].image_annotations - - assethash_filename_list: List[Tuple[str, str]] = [] # hash id and main file name - with open(in_sha1_file, "r") as in_file: - for line in in_file.readlines(): - line_components = line.strip().split('\t') - if not line_components or len(line_components) < 2: - logging.warning("incomplete line: %s", line) - continue - asset_hash, file_name = line_components[0], line_components[1] - if asset_hash not in mir_metadatas.attributes: - continue - main_file_name = os.path.splitext(os.path.basename(file_name))[0] - assethash_filename_list.append((asset_hash, main_file_name)) - - total_assethash_count = len(assethash_filename_list) - logging.info(f"wrting {total_assethash_count} annotations") - - counter = 0 - missing_annotations_counter = 0 - for asset_hash, main_file_name in assethash_filename_list: - # for each asset, import it's annotations - annotation_file = os.path.join(annotations_dir_path, main_file_name + '.xml') if annotations_dir_path else None - if not annotation_file or not os.path.isfile(annotation_file): - missing_annotations_counter += 1 + class_type_manager = class_ids.load_or_create_userlabels(mir_root=mir_root) + logging.info("loaded type id and names: %d", len(class_type_manager.all_ids())) + + if prediction_dir_path: + logging.info(f"wrting prediction in {prediction_dir_path}") + _import_annotations_from_dir( + map_hashed_filename=map_hashed_filename, + mir_annotation=mir_annotation, + annotations_dir_path=prediction_dir_path, + class_type_manager=class_type_manager, + unknown_types_strategy=unknown_types_strategy, + accu_new_class_names=anno_import_result, + image_annotations=mir_annotation.prediction, + anno_type=anno_type, + ) + _import_annotation_meta(class_type_manager=class_type_manager, + annotations_dir_path=prediction_dir_path, + task_annotations=mir_annotation.prediction) + PhaseLoggerCenter.update_phase(phase=phase, local_percent=0.5) + + if groundtruth_dir_path: + logging.info(f"wrting ground-truth in {groundtruth_dir_path}") + _import_annotations_from_dir( + map_hashed_filename=map_hashed_filename, + mir_annotation=mir_annotation, + annotations_dir_path=groundtruth_dir_path, + class_type_manager=class_type_manager, + unknown_types_strategy=unknown_types_strategy, + accu_new_class_names=anno_import_result, + image_annotations=mir_annotation.ground_truth, + anno_type=anno_type, + ) + PhaseLoggerCenter.update_phase(phase=phase, local_percent=1.0) + + if unknown_types_strategy == UnknownTypesStrategy.STOP and anno_import_result: + raise MirRuntimeError(error_code=MirCode.RC_CMD_UNKNOWN_TYPES, + error_message=f"{list(anno_import_result.keys())}") + + return anno_import_result + + +def _import_annotations_from_dir(map_hashed_filename: Dict[str, str], mir_annotation: mirpb.MirAnnotations, + annotations_dir_path: str, class_type_manager: class_ids.UserLabels, + unknown_types_strategy: UnknownTypesStrategy, accu_new_class_names: Dict[str, int], + image_annotations: mirpb.SingleTaskAnnotations, anno_type: "mirpb.AnnoType.V") -> None: + # temp solution: set to seg type if SegmentationClass and labelmap.txt exist. + # will be removed once seg type can be passed via web. + if (os.path.isdir(os.path.join(annotations_dir_path, "SegmentationClass")) + and os.path.isfile(os.path.join(annotations_dir_path, "labelmap.txt"))): + anno_type = mirpb.AnnoType.AT_SEG_MASK + + image_annotations.type = anno_type + _annotation_parse_func(anno_type)( + map_hashed_filename=map_hashed_filename, + mir_annotation=mir_annotation, + annotations_dir_path=annotations_dir_path, + class_type_manager=class_type_manager, + unknown_types_strategy=unknown_types_strategy, + accu_new_class_names=accu_new_class_names, + image_annotations=image_annotations, + ) + + logging.warning(f"imported {len(image_annotations.image_annotations)} / {len(map_hashed_filename)} annotations") + + +def _import_annotations_seg_mask(map_hashed_filename: Dict[str, str], mir_annotation: mirpb.MirAnnotations, + annotations_dir_path: str, class_type_manager: class_ids.UserLabels, + unknown_types_strategy: UnknownTypesStrategy, accu_new_class_names: Dict[str, int], + image_annotations: mirpb.SingleTaskAnnotations) -> None: + # fortmat ref: + # https://github.com/acesso-io/techcore-cvat/tree/develop/cvat/apps/dataset_manager/formats#segmentation-mask-import + # single line reprs label&color map, e.g. "ego vehicle:0,181,0::" or "road:07::"; otherwise "..." for place-holder. + label_map_file = os.path.join(annotations_dir_path, 'labelmap.txt') + if not os.path.isfile(label_map_file): + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="labelmap.txt is required.") + with open(label_map_file) as f: + records = f.readlines() + + # parse label_map.txt into map_cname_color. + map_cname_color: Dict[str, Tuple[int, int, int]] = {} + for record in records: + record = record.strip() + if ':' not in record or not record: + logging.info("place-holder line, skipping.") + continue + + record_split = record.split(':') + if len(record_split) != 4: + logging.info(f"invalid labelmap line: {record}") + continue + pos_ints: List[int] = [int(x) for x in record_split[1].split(',')] + if len(pos_ints) == 1: # single channel to 3 channels. + pos_ints = [pos_ints[0], pos_ints[0], pos_ints[0]] + if len(pos_ints) != 3: + logging.info(f"invalid labelmap color idx: {pos_ints}") + continue + if pos_ints == (0, 0, 0): + logging.info("ignore background color.") + continue + _, cname = class_type_manager.id_and_main_name_for_name(name=record_split[0]) + map_cname_color[cname] = (pos_ints[0], pos_ints[1], pos_ints[2]) + + # batch add all names, including unknown/known names. + if unknown_types_strategy == UnknownTypesStrategy.ADD: + class_type_manager.add_main_names(list(map_cname_color.keys())) + + # build color map, map all unknown classes to background (0, 0, 0). + map_color_pixel: Dict[Tuple[int, int, int], Tuple[int, int, int]] = {(0, 0, 0): (0, 0, 0)} + map_color_cid: Dict[Tuple[int, int, int], int] = {} + for name, color in map_cname_color.items(): + cid, cname = class_type_manager.id_and_main_name_for_name(name=name) + if cname not in accu_new_class_names: + accu_new_class_names[cname] = 0 + + if cid >= 0: + point = mirpb.IntPoint() + point.x, point.y, point.z = color + image_annotations.map_id_color[cid].CopyFrom(point) + map_color_pixel[color] = color + map_color_cid[color] = cid else: - # if have annotation file, import annotations and predefined key ids - dom_tree = xml.dom.minidom.parse(annotation_file) - if not dom_tree: - logging.error(f"cannot open annotation_file: {annotation_file}") - return MirCode.RC_CMD_INVALID_ARGS, unknown_types_and_count - - collection = dom_tree.documentElement - objects = collection.getElementsByTagName("object") - for idx, obj in enumerate(objects): - type_name = _xml_obj_to_type_name(obj) - if class_type_manager.has_name(type_name): - annotation = _xml_obj_to_annotation(obj, class_type_manager) - annotation.index = idx - image_annotations[asset_hash].annotations.append(annotation) - else: - unknown_types_and_count[type_name] += 1 - - counter += 1 - if counter % 5000 == 0: - PhaseLoggerCenter.update_phase(phase=phase, local_percent=(counter / total_assethash_count)) - - if missing_annotations_counter > 0: - logging.warning(f"asset count that have no annotations: {missing_annotations_counter}") - - return MirCode.RC_OK, unknown_types_and_count + map_color_pixel[color] = (0, 0, 0) + + semantic_mask_dir = os.path.join(annotations_dir_path, "SegmentationClass") + unexpected_color: Set[Tuple[int, int, int]] = set() + for asset_hash, main_file_name in map_hashed_filename.items(): + # for each asset, import it's annotations + annotation_file = os.path.join(semantic_mask_dir, main_file_name + '.png') + if not os.path.isfile(annotation_file): + continue + try: + mask_image = Image.open(annotation_file) + except (UnidentifiedImageError, OSError) as e: + logging.info(f"{type(e).__name__}: {e}\nannotation_file: {annotation_file}\n") + continue + asset_type_str: str = mask_image.format.lower() + if asset_type_str != 'png': + logging.error(f"cannot import annotation_file: {annotation_file} as type: {asset_type_str}") + continue + + mask_image = mask_image.convert('RGB') + img_class_ids: Set[int] = set() + width, height = mask_image.size + for x in range(width): + for y in range(height): + color = mask_image.getpixel((x, y)) + if color in map_color_pixel: + unexpected_color.add(color) + + # map_color_cid (known class names) is subset of map_color_pixel (including known/unknown). + if color in map_color_cid: + img_class_ids.add(map_color_cid[color]) + elif color != (0, 0, 0): # map unknown color to (0,0,0). + mask_image.putpixel((x, y), (0, 0, 0)) + with io.BytesIO() as output: + mask_image.save(output, format="PNG") + image_annotations.image_annotations[asset_hash].mask.semantic_mask = output.getvalue() + image_annotations.image_annotations[asset_hash].img_class_ids[:] = list(img_class_ids) + if unexpected_color: + logging.info(f"find color not in labelmap.txt: {unexpected_color}") + + +def _import_annotations_voc_xml(map_hashed_filename: Dict[str, str], mir_annotation: mirpb.MirAnnotations, + annotations_dir_path: str, class_type_manager: class_ids.UserLabels, + unknown_types_strategy: UnknownTypesStrategy, accu_new_class_names: Dict[str, int], + image_annotations: mirpb.SingleTaskAnnotations) -> None: + add_if_not_found = (unknown_types_strategy == UnknownTypesStrategy.ADD) + for asset_hash, main_file_name in map_hashed_filename.items(): + # for each asset, import it's annotations + annotation_file = os.path.join(annotations_dir_path, main_file_name + '.xml') + if not os.path.isfile(annotation_file): + continue + + with open(annotation_file, 'r') as f: + annos_xml_str = f.read() + if not annos_xml_str: + logging.error(f"cannot open annotation_file: {annotation_file}") + continue + + annos_dict: dict = xmltodict.parse(annos_xml_str)['annotation'] + # cks + cks = annos_dict.get('cks', {}) # cks could be None + if cks: + mir_annotation.image_cks[asset_hash].cks.update(cks) + mir_annotation.image_cks[asset_hash].image_quality = float(annos_dict.get('image_quality', '-1.0')) + + # annotations and tags + objects: Union[List[dict], dict] = annos_dict.get('object', []) + if isinstance(objects, dict): + # when there's only ONE object node in xml, it will be parsed to a dict, not a list + objects = [objects] + + anno_idx = 0 + for object_dict in objects: + cid, new_type_name = class_type_manager.id_and_main_name_for_name(name=object_dict['name']) + + # check if seen this class_name. + if new_type_name in accu_new_class_names: + accu_new_class_names[new_type_name] += 1 + else: + # for unseen class_name, only care about negative cid. + if cid < 0: + if add_if_not_found: + cid, _ = class_type_manager.add_main_name(main_name=new_type_name) + accu_new_class_names[new_type_name] = 0 + + if cid >= 0: + annotation = _object_dict_to_annotation(object_dict, cid) + annotation.index = anno_idx + image_annotations.image_annotations[asset_hash].boxes.append(annotation) + anno_idx += 1 + + +def _import_annotation_meta(class_type_manager: class_ids.UserLabels, annotations_dir_path: str, + task_annotations: mirpb.SingleTaskAnnotations) -> None: + annotation_meta_path = os.path.join(annotations_dir_path, 'meta.yaml') + if not os.path.isfile(annotation_meta_path): + return + + try: + with open(annotation_meta_path, 'r') as f: + annotation_meta_dict = yaml.safe_load(f) + except Exception: + annotation_meta_dict = None + if not isinstance(annotation_meta_dict, dict): + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_META_YAML_FILE, + error_message='Invalid meta.yaml') + + # model + if 'model' in annotation_meta_dict: + ParseDict(annotation_meta_dict['model'], task_annotations.model) + + # eval_class_ids + eval_class_names = annotation_meta_dict.get('eval_class_names') or task_annotations.model.class_names + task_annotations.eval_class_ids[:] = set( + class_type_manager.id_for_names(list(eval_class_names), drop_unknown_names=True)[0]) + + # executor_config + if 'executor_config' in annotation_meta_dict: + task_annotations.executor_config = json.dumps(annotation_meta_dict['executor_config']) + + +def copy_annotations_pred_meta(src_task_annotations: mirpb.SingleTaskAnnotations, + dst_task_annotations: mirpb.SingleTaskAnnotations) -> None: + dst_task_annotations.eval_class_ids[:] = src_task_annotations.eval_class_ids + dst_task_annotations.executor_config = src_task_annotations.executor_config + dst_task_annotations.model.CopyFrom(src_task_annotations.model) diff --git a/ymir/command/mir/tools/checker.py b/ymir/command/mir/tools/checker.py index bd340b77c0..e95f919d72 100644 --- a/ymir/command/mir/tools/checker.py +++ b/ymir/command/mir/tools/checker.py @@ -4,7 +4,7 @@ import sys from typing import Callable, List -from mir.tools import class_ids, mir_repo_utils +from mir.tools import mir_repo_utils from mir.tools.code import MirCode @@ -33,8 +33,7 @@ class Prerequisites(enum.IntEnum): Prerequisites.IS_OUTSIDE_MIR_REPO: 'mir_root is already a mir repo', Prerequisites.IS_DIRTY: 'mir repo is clean (need dirty)', Prerequisites.IS_CLEAN: 'mir repo is dirty (need clean)', - Prerequisites.HAVE_LABELS: f"can not find {class_ids.ids_file_name()}", - Prerequisites.HAVE_NO_LABELS: f"already have {class_ids.ids_file_name()}", + Prerequisites.HAVE_LABELS: "can not find userlabel file", } @@ -86,10 +85,5 @@ def _check_is_clean(mir_root: str) -> int: def _check_have_labels(mir_root: str) -> int: - have_labels = os.path.isfile(class_ids.ids_file_path(mir_root)) + have_labels = os.path.isfile(os.path.join(mir_root, '.mir', 'labels.yaml')) return MirCode.RC_OK if have_labels else MirCode.RC_CMD_INVALID_MIR_REPO - - -def _check_have_no_labels(mir_root: str) -> int: - have_labels = os.path.isfile(class_ids.ids_file_path(mir_root)) - return MirCode.RC_OK if not have_labels else MirCode.RC_CMD_INVALID_MIR_REPO diff --git a/ymir/command/mir/tools/class_ids.py b/ymir/command/mir/tools/class_ids.py index 37c2593809..981636d360 100644 --- a/ymir/command/mir/tools/class_ids.py +++ b/ymir/command/mir/tools/class_ids.py @@ -1,44 +1,48 @@ +from datetime import datetime import os -from typing import Any, Dict, List, Optional, Set, Tuple +from typing import Dict, Iterator, List, Optional, Set, Tuple, Union -from pydantic import BaseModel, validator, root_validator +import fasteners # type: ignore +from mir.version import YMIR_VERSION, ymir_salient_version +from pydantic import BaseModel, root_validator, validator, validate_model import yaml -from mir.tools import utils as mir_utils -EXPECTED_FILE_VERSION = 1 - - -class _SingleLabel(BaseModel): - id: int +class SingleLabel(BaseModel): + id: int = -1 name: str + create_time: datetime = datetime(year=2022, month=1, day=1) + update_time: datetime = datetime(year=2022, month=1, day=1) aliases: List[str] = [] @validator('name') def _strip_and_lower_name(cls, v: str) -> str: - return v.strip().lower() + return _normalize_and_check_name(v) @validator('aliases', each_item=True) def _strip_and_lower_alias(cls, v: str) -> str: - return v.strip().lower() + return _normalize_and_check_name(v) -class _LabelStorage(BaseModel): - version: int = EXPECTED_FILE_VERSION - labels: List[_SingleLabel] = [] - _label_to_ids: Dict[str, Tuple[int, Optional[str]]] = {} - _id_to_labels: Dict[int, str] = {} +class LabelStorage(BaseModel): + labels: List[SingleLabel] = [] + ymir_version: str = YMIR_VERSION - @validator('version') - def _check_version(cls, v: int) -> int: - if v != EXPECTED_FILE_VERSION: - raise ValueError(f"incorrect version: {v}, needed {EXPECTED_FILE_VERSION}") + # protected: validators + @validator('ymir_version') + def _check_version(cls, v: str) -> str: + expected_sversion = ymir_salient_version(YMIR_VERSION) + current_sversion = ymir_salient_version(v) + if current_sversion != expected_sversion: + raise ValueError(f"{v}/{current_sversion}, mismatch {YMIR_VERSION}/{expected_sversion}") return v @validator('labels') - def _check_labels(cls, labels: List[_SingleLabel]) -> List[_SingleLabel]: + def _check_labels(cls, labels: List[SingleLabel]) -> List[SingleLabel]: label_names_set: Set[str] = set() for idx, label in enumerate(labels): + if label.id < 0: + label.id = idx if label.id != idx: raise ValueError(f"invalid label id: {label.id}, expected {idx}") @@ -53,170 +57,262 @@ def _check_labels(cls, labels: List[_SingleLabel]) -> List[_SingleLabel]: label_names_set.update(name_and_aliases_set) return labels + +class UserLabels(LabelStorage): + _id_to_name: Dict[int, str] = {} + _name_aliases_to_id: Dict[str, int] = {} + storage_file: Optional[str] = None + @root_validator def _generate_dicts(cls, values: dict) -> dict: - labels: List[_SingleLabel] = values.get('labels', []) - label_to_ids: Dict[str, Tuple[int, Optional[str]]] = {} - id_to_labels: Dict[int, str] = {} - for label in labels: - _set_if_not_exists(k=label.name, v=(label.id, None), d=label_to_ids, error_message_prefix='duplicated name') - # key: aliases + # source priority: storage_file > labels. + # in most cases, UserLabels is bind to a storage_file. + storage_file = values.get("storage_file") + if storage_file and os.path.isfile(storage_file): + with open(storage_file, 'r') as f: + file_obj = yaml.safe_load(f) + if file_obj is None: + file_obj = {} + values["labels"] = LabelStorage(**file_obj).labels + + name_aliases_to_id: Dict[str, int] = {} + id_to_name: Dict[int, str] = {} + for label in values['labels']: + name_aliases_to_id[label.name] = label.id for label_alias in label.aliases: - _set_if_not_exists(k=label_alias, - v=(label.id, label.name), - d=label_to_ids, - error_message_prefix='duplicated alias') + name_aliases_to_id[label_alias] = label.id - # self._type_id_name_dict - _set_if_not_exists(k=label.id, v=label.name, d=id_to_labels, error_message_prefix='duplicated id') + id_to_name[label.id] = label.name - values['_label_to_ids'] = label_to_ids - values['_id_to_labels'] = id_to_labels + values['_name_aliases_to_id'] = name_aliases_to_id + values['_id_to_name'] = id_to_name return values + def __reload(self) -> None: + if not (self.storage_file and os.path.isfile(self.storage_file)): + raise RuntimeError("cannot reload with empty storage_file.") + + *_, validation_error = validate_model(self.__class__, self.__dict__) + if validation_error: + raise validation_error + + def __save(self) -> None: + if not self.storage_file: + raise RuntimeError("empty storage_file.") + + with open(self.storage_file, 'w') as f: + yaml.safe_dump(self.dict(), f) + + def _add_new_cname(self, name: str, exist_ok: bool = True) -> Tuple[int, str]: + name = _normalize_and_check_name(name) + if name in self._name_aliases_to_id: + if not exist_ok: + raise ValueError("{name} already exists in userlabels.") + cid = self._name_aliases_to_id[name] + return (cid, self._id_to_name[cid]) + + current_datetime = datetime.now() + added_class_id = len(self.labels) + self.labels.append( + SingleLabel( + id=added_class_id, + name=name, + create_time=current_datetime, + update_time=current_datetime, + )) + + # update lookup dict. + self._name_aliases_to_id[name] = added_class_id + self._id_to_name[added_class_id] = name + + return added_class_id, name + + class Config: + fields = {'labels': {'include': True}, 'ymir_version': {'include': True}} + + # public interfaces. + def id_and_main_name_for_name(self, name: str) -> Tuple[int, str]: + name = _normalize_and_check_name(name) + id = self._name_aliases_to_id.get(name, -1) + name = self._id_to_name.get(id, name) + return (id, name) + + def id_for_names(self, + names: Union[str, List[str]], + drop_unknown_names: bool = False, + raise_if_unknown: bool = False) -> Tuple[List[int], List[str]]: + if isinstance(names, str): + names = [names] + + class_ids: List[int] = [] + unknown_names: List[str] = [] + for name in names: + class_id, cname = self.id_and_main_name_for_name(name=name) + if class_id >= 0: + class_ids.append(class_id) + else: + unknown_names.append(cname) + if not drop_unknown_names: + class_ids.append(class_id) -def ids_file_name() -> str: - return 'labels.yaml' - - -def ids_file_path(mir_root: str) -> str: - return os.path.join(mir_utils.repo_dot_mir_path(mir_root=mir_root), ids_file_name()) - - -def create_empty_if_not_exists(mir_root: str) -> None: - file_path = ids_file_path(mir_root=mir_root) - if os.path.isfile(file_path): - return - - label_storage = _LabelStorage() - with open(file_path, 'w') as f: - yaml.safe_dump(label_storage.dict(), f) - - -class ClassIdManagerError(BaseException): - pass - - -class ClassIdManager(object): - """ - a query tool for label storage file - """ - __slots__ = ("_storage_file_path", "_label_storage") - - # life cycle - def __init__(self, mir_root: str) -> None: - super().__init__() - - # it will have value iff successfully loaded - self._storage_file_path = '' + if raise_if_unknown and unknown_names: + raise ValueError(f"unknown class found: {unknown_names}") - self.__load(ids_file_path(mir_root=mir_root)) + return class_ids, unknown_names - # private: load and unload - def __load(self, file_path: str) -> bool: - if not file_path: - raise ClassIdManagerError('empty path received') - if self._storage_file_path: - raise ClassIdManagerError(f"already loaded from: {self._storage_file_path}") + def main_name_for_id(self, class_id: int) -> str: + if class_id not in self._id_to_name: + raise ValueError(f"copy: unknown src class id: {class_id}") + return self._id_to_name[class_id] - with open(file_path, 'r') as f: - file_obj = yaml.safe_load(f) - if file_obj is None: - file_obj = {} + def main_name_for_ids(self, class_ids: List[int]) -> List[str]: + return [self.main_name_for_id(class_id) for class_id in class_ids] - self._label_storage = _LabelStorage(**file_obj) - # save `self._storage_file_path` as a flag of successful loading - self._storage_file_path = file_path - return True + def all_main_names(self) -> List[str]: + return list(self._id_to_name.values()) - # public: general - def id_and_main_name_for_name(self, name: str) -> Tuple[int, Optional[str]]: - """ - returns type id and main type name for main type name or alias + def all_main_name_aliases(self) -> List[str]: + return list(self._name_aliases_to_id.keys()) - Args: - name (str): main type name or alias + def all_ids(self) -> List[int]: + return list(self._id_to_name.keys()) - Raises: - ClassIdManagerError: if not loaded, or name is empty + def has_name(self, name: str) -> bool: + return self.id_for_names(name)[0][0] >= 0 - Returns: - Tuple[int, Optional[str]]: (type id, main type name), - if name not found, returns -1, None - """ - name = name.strip().lower() - if not self._storage_file_path: - raise ClassIdManagerError("not loade") - if not name: - raise ClassIdManagerError("empty name") + def has_id(self, cid: int) -> bool: + return cid in self._id_to_name - if name not in self._label_storage._label_to_ids: - return -1, None + def add_main_name(self, main_name: str) -> Tuple[int, str]: + return self.add_main_names([main_name])[0] - return self._label_storage._label_to_ids[name] + def add_main_names(self, main_names: List[str]) -> List[Tuple[int, str]]: + # only trigger reload at saving, not read safe, main_name may already been added in another process. + self.__reload() + ret_val: List[Tuple[int, str]] = [] - def main_name_for_id(self, type_id: int) -> Optional[str]: + # shortcut, return if all names are known. + for main_name in main_names: + class_id, main_name = self.id_and_main_name_for_name(main_name) + if class_id < 0: + break + ret_val.append((class_id, main_name)) + if len(ret_val) == len(main_names): # all known names. + return ret_val + + if not self.storage_file: + raise RuntimeError("empty storage_file.") + + ret_val.clear() + with fasteners.InterProcessLock(path=os.path.realpath(self.storage_file) + '.lock'): + for main_name in main_names: + added_class_id, main_name = self._add_new_cname(name=main_name) + ret_val.append((added_class_id, main_name)) + self.__save() + return ret_val + + def upsert_labels(self, new_labels: "UserLabels", check_only: bool = False) -> "UserLabels": """ - get main type name for type id, if not found, returns None - - Args: - type_id (int): type id - - Returns: - Optional[str]: corresponding main type name, if not found, returns None + update or insert new_labels, return labels that are failed to add """ - return self._label_storage._id_to_labels.get(type_id, None) + if not self.storage_file: + raise RuntimeError("empty storage_file.") + + self.__reload() + with fasteners.InterProcessLock(path=os.path.realpath(self.storage_file) + '.lock'): + current_time = datetime.now() + + conflict_labels = [] + for label in new_labels.labels: + new_label = SingleLabel.parse_obj(label.dict()) + idx = self.id_and_main_name_for_name(label.name)[0] + + # in case any alias is in other labels. + conflict_alias = [] + for alias in label.aliases: + alias_idx = self.id_and_main_name_for_name(alias)[0] + if alias_idx >= 0 and alias_idx != idx: + conflict_alias.append(alias) + if conflict_alias: + new_label.id = -1 + conflict_labels.append(new_label) + continue + + new_label.update_time = current_time + if idx >= 0: # update alias. + new_label.id = idx + new_label.create_time = self.labels[idx].create_time + self.labels[idx] = new_label + else: # insert new record. + new_label.id = len(self.labels) + new_label.create_time = current_time + self.labels.append(new_label) + + if not (check_only or conflict_labels): + self.__save() + + return UserLabels(labels=conflict_labels) + + def find_dups(self, new_labels: Union[str, List, "UserLabels"]) -> List[str]: + if isinstance(new_labels, str): + new_set = {new_labels} + elif isinstance(new_labels, list): + new_set = set(new_labels) + elif isinstance(new_labels, type(self)): + new_set = set(new_labels.all_main_name_aliases()) + return list(set(self.all_main_name_aliases()) & new_set) + + # keyword: {"name": "dog", "aliases": ["puppy", "pup", "canine"]} + def filter_labels( + self, + required_name_aliaes: List[str] = None, + required_ids: List[int] = None, + ) -> Iterator[SingleLabel]: + if required_name_aliaes and required_ids: + raise ValueError("required_name_alias and required_ids cannot be both set.") + if required_name_aliaes: + required_ids = self.id_for_names(names=required_name_aliaes, raise_if_unknown=True)[0] + + for label in self.labels: + if required_ids is None or label.id in required_ids: + yield label - def id_for_names(self, names: List[str]) -> Tuple[List[int], List[str]]: - """ - return all type ids for names - Args: - names (List[str]): main type names or alias +def ids_file_name() -> str: + return 'labels.yaml' - Returns: - Tuple[List[int], List[str]]: corresponding type ids and unknown names - """ - class_ids = [] - unknown_names = [] - for name in names: - class_id = self.id_and_main_name_for_name(name=name)[0] - class_ids.append(class_id) - if class_id < 0: - unknown_names.append(name) +def ids_file_path(mir_root: str) -> str: + mir_dir = os.path.join(mir_root, '.mir') + os.makedirs(mir_dir, exist_ok=True) + return os.path.join(mir_dir, ids_file_name()) - return class_ids, unknown_names - def all_main_names(self) -> List[str]: - """ - Returns: - List[str]: all main names, if not loaded, returns empty list - """ - return list(self._label_storage._id_to_labels.values()) +def load_or_create_userlabels(mir_root: Optional[str] = None, + label_storage_file: Optional[str] = None, + create_ok: bool = False) -> UserLabels: + if mir_root: + if label_storage_file: + raise RuntimeError("mir_root and label_storage_file cannot both set.") + label_storage_file = ids_file_path(mir_root=mir_root) - def all_ids(self) -> List[int]: - """ - Returns: - List[int]: all class_ids, if not loaded, returns empty list - """ - return list(self._label_storage._id_to_labels.keys()) + if not label_storage_file: + raise ValueError("empty label_storage_file") - def size(self) -> int: - """ - Returns: - int: size of all type ids and main names, if not loaded, returns 0 - """ - return len(self._label_storage._id_to_labels) + if os.path.isfile(label_storage_file): + return UserLabels(storage_file=label_storage_file) - def has_name(self, name: str) -> bool: - return name.strip().lower() in self._label_storage._label_to_ids + if not create_ok: + raise RuntimeError("label file miss in mir_root.") - def has_id(self, type_id: int) -> bool: - return type_id in self._label_storage._id_to_labels + user_labels = UserLabels() + with open(label_storage_file, 'w') as f: + yaml.safe_dump(user_labels.dict(), f) + return user_labels -def _set_if_not_exists(k: Any, v: Any, d: dict, error_message_prefix: str) -> None: - if k in d: - raise ClassIdManagerError(f"{error_message_prefix}: {k}") - d[k] = v +def _normalize_and_check_name(name: str) -> str: + name = name.lower().strip() + if not name: + raise ValueError("get empty normalized name") + return name diff --git a/ymir/command/mir/tools/code.py b/ymir/command/mir/tools/code.py index 18edef10ec..111e0e1dc4 100644 --- a/ymir/command/mir/tools/code.py +++ b/ymir/command/mir/tools/code.py @@ -1,4 +1,8 @@ from enum import IntEnum +from functools import wraps +import logging +import time +from typing import Dict, Callable class MirCode(IntEnum): @@ -14,4 +18,21 @@ class MirCode(IntEnum): RC_CMD_INVALID_MIR_REPO = 160009 RC_CMD_INVALID_FILE = 160010 RC_CMD_NO_RESULT = 160011 # no result for training, mining and infer + RC_CMD_OPENPAI_ERROR = 160012 + RC_CMD_NO_ANNOTATIONS = 160013 + RC_CMD_CAN_NOT_CALC_CONFUSION_MATRIX = 160014 + RC_CMD_INVALID_MODEL_PACKAGE_VERSION = 160015 + RC_CMD_INVALID_META_YAML_FILE = 160016 RC_CMD_ERROR_UNKNOWN = 169999 + + +def time_it(f: Callable) -> Callable: + @wraps(f) + def wrapper(*args: tuple, **kwargs: Dict) -> Callable: + _start = time.time() + _ret = f(*args, **kwargs) + _cost = time.time() - _start + logging.info(f"|-{f.__name__} costs {_cost:.2f}s({_cost / 60:.2f}m).") + return _ret + + return wrapper diff --git a/ymir/command/mir/tools/command_run_in_out.py b/ymir/command/mir/tools/command_run_in_out.py index c895374a02..8ae8d8ac7e 100644 --- a/ymir/command/mir/tools/command_run_in_out.py +++ b/ymir/command/mir/tools/command_run_in_out.py @@ -6,7 +6,7 @@ import traceback from typing import Any, Callable, Set -from mir.tools import mir_repo_utils, mir_storage_ops, phase_logger, revs_parser, utils +from mir.tools import mir_repo_utils, mir_storage_ops, phase_logger, revs_parser from mir.tools.code import MirCode from mir.tools.errors import MirRuntimeError from mir.protos import mir_command_pb2 as mirpb @@ -17,7 +17,6 @@ def _get_task_name(dst_rev: str) -> str: return revs_parser.parse_single_arg_rev(dst_rev, need_tid=True).tid if dst_rev else 'default_task' -@utils.time_it def _commit_error(code: int, error_msg: str, mir_root: str, src_revs: str, dst_rev: str, predefined_task: Any) -> None: if not src_revs: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, @@ -42,7 +41,10 @@ def _commit_error(code: int, error_msg: str, mir_root: str, src_revs: str, dst_r mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, mir_branch=dst_typ_rev_tid.rev, his_branch=src_typ_rev_tid.rev, - mir_datas={}, + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mirpb.MirMetadatas(), + mirpb.MirStorage.MIR_ANNOTATIONS: mirpb.MirAnnotations() + }, task=predefined_task) @@ -56,7 +58,9 @@ def _cleanup_dir_sub_items(dir: str, ignored_items: Set[str]) -> None: continue item_path = os.path.join(dir, item) - if os.path.isdir(item_path): + if os.path.islink(item_path): + os.unlink(item_path) + elif os.path.isdir(item_path): shutil.rmtree(item_path) elif os.path.isfile(item_path): os.remove(item_path) @@ -66,8 +70,13 @@ def _cleanup(work_dir: str) -> None: if not work_dir: return - _cleanup_dir_sub_items(work_dir, ignored_items={'out'}) + _cleanup_dir_sub_items(work_dir, ignored_items={'in', 'out'}) + _cleanup_dir_sub_items( + os.path.join(work_dir, 'in'), + ignored_items={ + 'config.yaml', # training, mining & infer executor config file + }) _cleanup_dir_sub_items( os.path.join(work_dir, 'out'), ignored_items={ @@ -76,6 +85,8 @@ def _cleanup(work_dir: str) -> None: 'monitor-log.txt', # monitor detail file 'tensorboard', # default root directory for tensorboard event files 'ymir-executor-out.log', # container output + 'infer-result.json', # infer result file + 'result.yaml', # mining result file }) @@ -116,17 +127,17 @@ def wrapper(mir_root: str, src_revs: str, dst_rev: str, work_dir: str, *args: tu mir_logger.update_percent_info(local_percent=1, task_state=phase_logger.PhaseStateEnum.DONE) # no need to call _commit_error, already committed inside command run function else: + mir_logger.update_percent_info(local_percent=1, + task_state=phase_logger.PhaseStateEnum.ERROR, + state_code=ret, + state_content=state_message, + trace_message='') _commit_error(code=ret, error_msg=state_message, mir_root=mir_root, src_revs=src_revs, dst_rev=dst_rev, predefined_task=None) - mir_logger.update_percent_info(local_percent=1, - task_state=phase_logger.PhaseStateEnum.ERROR, - state_code=ret, - state_content=state_message, - trace_message='') logging.info(f"command done: {dst_rev}, return code: {ret}") @@ -136,6 +147,11 @@ def wrapper(mir_root: str, src_revs: str, dst_rev: str, work_dir: str, *args: tu # if MirContainerError, MirRuntimeError and BaseException occured # exception saved in exc + mir_logger.update_percent_info(local_percent=1, + task_state=phase_logger.PhaseStateEnum.ERROR, + state_code=error_code, + state_content=state_message, + trace_message=trace_message) if needs_new_commit: _commit_error(code=error_code, error_msg=trace_message, @@ -143,16 +159,12 @@ def wrapper(mir_root: str, src_revs: str, dst_rev: str, work_dir: str, *args: tu src_revs=src_revs, dst_rev=dst_rev, predefined_task=predefined_task) - mir_logger.update_percent_info(local_percent=1, - task_state=phase_logger.PhaseStateEnum.ERROR, - state_code=error_code, - state_content=state_message, - trace_message=trace_message) logging.info(f"command failed: {dst_rev}; exc: {exc}") logging.info(f"trace: {trace_message}") - _cleanup(work_dir=work_dir) + # should not cleanup task env if failed. + # _cleanup(work_dir=work_dir) raise exc diff --git a/ymir/command/mir/tools/context.py b/ymir/command/mir/tools/context.py deleted file mode 100644 index 821f633679..0000000000 --- a/ymir/command/mir/tools/context.py +++ /dev/null @@ -1,46 +0,0 @@ -import os -from typing import List - -import yaml - -from mir.tools import class_ids -from mir.tools import utils as mir_utils - - -def context_file_path_from_mir_root(mir_root: str) -> str: - return os.path.join(mir_utils.repo_dot_mir_path(mir_root=mir_root), 'context.yaml') - - -# save and load -def load(mir_root: str) -> List[int]: - context_file_path = context_file_path_from_mir_root(mir_root) - if not os.path.isfile(context_file_path): - return [] - - with open(context_file_path, 'r') as f: - context_obj = yaml.safe_load(f) - return context_obj.get('project', {}).get('class_ids', []) - - -def save(mir_root: str, project_class_ids: List[int]) -> None: - context_file_path = context_file_path_from_mir_root(mir_root) - - with open(context_file_path, 'w') as f: - yaml.safe_dump({'project': {'class_ids': project_class_ids}}, f) - - -# general -def check_class_ids(mir_root: str, current_class_ids: List[int]) -> bool: - """ - check `current_class_ids` matches mir repo's project class ids settings - - if mir repo has project class ids settings, this function returns True if they are equal - - if mir repo has no project class ids settings, this function always returns True, meaning they are always matched - """ - project_class_ids = load(mir_root=mir_root) - if not project_class_ids: - # if this mir repo not binded to project, treat as equal - return True - user_class_ids = class_ids.ClassIdManager(mir_root).all_ids() - return set(current_class_ids) <= set(user_class_ids) diff --git a/ymir/command/mir/tools/data_exporter.py b/ymir/command/mir/tools/data_exporter.py deleted file mode 100644 index 36e2e4e7cc..0000000000 --- a/ymir/command/mir/tools/data_exporter.py +++ /dev/null @@ -1,429 +0,0 @@ -""" -exports the assets and annotations from mir format to ark-training-format -""" - -from collections.abc import Collection -from enum import Enum -import logging -import json -import os -from typing import Any, Callable, Dict, List, Optional, Set -import uuid -import xml.etree.ElementTree as ElementTree - -from mir.protos import mir_command_pb2 as mirpb -from mir.tools import class_ids, mir_storage_ops -from mir.tools import utils as mir_utils -from mir.tools.code import MirCode -from mir.tools.errors import MirRuntimeError - - -class ExportError(Exception): - """ - exception type raised by function `export` - """ - pass - - -class ExportFormat(str, Enum): - EXPORT_FORMAT_UNKNOWN = 'unknown' - EXPORT_FORMAT_NO_ANNOTATION = 'none' - EXPORT_FORMAT_ARK = 'ark' - EXPORT_FORMAT_VOC = 'voc' - EXPORT_FORMAT_LS_JSON = 'ls_json' # label studio json format - - -def check_support_format(anno_format: str) -> bool: - return anno_format in support_format_type() - - -def support_format_type() -> List[str]: - return [f.value for f in ExportFormat] - - -def format_type_from_str(anno_format: str) -> ExportFormat: - return ExportFormat(anno_format.lower()) - - -def format_file_ext(anno_format: ExportFormat) -> str: - _format_ext_map = { - ExportFormat.EXPORT_FORMAT_ARK: '.txt', - ExportFormat.EXPORT_FORMAT_VOC: '.xml', - ExportFormat.EXPORT_FORMAT_LS_JSON: '.json', - } - return _format_ext_map[anno_format] - - -def _rel_annotation_path_for_asset(rel_asset_path: str, format_type: ExportFormat) -> str: - rel_asset_path_without_ext = os.path.splitext(rel_asset_path)[0] - return f"{rel_asset_path_without_ext}{format_file_ext(format_type)}" - - -def format_file_output_func(anno_format: ExportFormat) -> Callable: - _format_func_map = { - ExportFormat.EXPORT_FORMAT_ARK: _single_image_annotations_to_ark, - ExportFormat.EXPORT_FORMAT_VOC: _single_image_annotations_to_voc, - ExportFormat.EXPORT_FORMAT_LS_JSON: _single_image_annotations_to_ls_json, - } - return _format_func_map[anno_format] - - -# public: export -def export(mir_root: str, - assets_location: str, - class_type_ids: Dict[int, int], - asset_ids: Set[str], - asset_dir: str, - annotation_dir: str, - need_ext: bool, - need_id_sub_folder: bool, - base_branch: str, - base_task_id: str, - format_type: ExportFormat, - index_file_path: str = '', - index_assets_prefix: str = '', - index_annotations_prefix: str = '') -> bool: - """ - export assets and annotations - - Args: - mir_root (str): path to mir repo root directory - assets_location (str): path to assets storage directory - class_type_ids (Dict[int, int]): class ids (and it's mapping value) - all objects within this dict keys will be exported, if None, export everything; - asset_ids (Set[str]): export asset ids - asset_dir (str): asset directory - annotation_dir (str): annotation directory, if format_type is NO_ANNOTATION, this could be None - need_ext (bool): if true, all export assets will have it's type as ext, jpg, png, etc. - need_id_sub_folder (bool): if True, use last 2 chars of asset id as a sub folder name - base_branch (str): data branch - format_type (ExportFormat): format type, NONE means exports no annotations - index_file_path (str): path to index file, if None, generates no index file - index_assets_prefix (str): prefix path added to each asset index path - index_annotations_prefix (str): prefix path added to each annotation index path - - Raises: - MirRuntimeError - - Returns: - bool: returns True if success - """ - if not mir_root: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message="invalid mir_repo") - - if not check_support_format(format_type): - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"invalid --format: {format_type}") - - # export assets - os.makedirs(asset_dir, exist_ok=True) - asset_result = mir_utils.store_assets_to_dir(asset_ids=list(asset_ids), - out_root=asset_dir, - sub_folder=".", - asset_location=assets_location, - overwrite=False, - create_prefix=need_id_sub_folder, - need_suffix=need_ext) - - # export annotations - if format_type != ExportFormat.EXPORT_FORMAT_NO_ANNOTATION: - [mir_metadatas, mir_annotations] = mir_storage_ops.MirStorageOps.load_multiple_storages( - mir_root=mir_root, - mir_branch=base_branch, - mir_task_id=base_task_id, - ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS]) - - # add all annotations to assets_to_det_annotations_dict - # key: asset_id as str, value: annotations as List[mirpb.Annotation] - assets_to_det_annotations_dict = _annotations_by_assets( - mir_annotations=mir_annotations, - class_type_ids=set(class_type_ids.keys()) if class_type_ids else None, - base_task_id=mir_annotations.head_task_id) - - _export_detect_annotations_to_path(asset_ids=list(asset_ids), - format_type=format_type, - mir_metadatas=mir_metadatas, - annotations_dict=assets_to_det_annotations_dict, - class_type_mapping=class_type_ids, - dest_path=annotation_dir, - mir_root=mir_root, - assert_id_filename_map=asset_result) - - # generate index file - if index_file_path: - _generate_asset_index_file(asset_rel_paths=asset_result.values(), - index_assets_prefix=index_assets_prefix, - index_annotations_prefix=index_annotations_prefix, - index_file_path=index_file_path, - format_type=format_type) - - return True - - -def _generate_asset_index_file(asset_rel_paths: Collection, - index_assets_prefix: str, - index_annotations_prefix: str, - index_file_path: str, - format_type: ExportFormat, - overwrite: bool = True, - image_exts: tuple = ('.jpg', '.jpeg', '.png')) -> None: - """ - generate index file for export result - - if format_type == NO_ANNOTATION, index file contains only asset paths - - if not, index file contains both asset and annotation paths, separated by `\t` - - Args: - asset_rel_paths (Collection): the relative asset paths, element type: str - index_assets_prefix (str): prefix path added in front of each element in asset_rel_paths - index_annotations_prefix (str): prefix path added in front of each annotations - index_file_path (str): index file save path - format_type (ExporterFormat): format type - override (bool): if True, override if file already exists, if False, raise Exception when already exists - - Raise: - MirRuntimeError: if index file already exists, and override set to False - """ - if not asset_rel_paths or not index_file_path: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='empty asset_rel_paths or index_file_path') - if os.path.exists(index_file_path): - if overwrite: - logging.warning(f"index file already exists, overwriting: {index_file_path}") - else: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"index file already exists: {index_file_path}") - - with open(index_file_path, 'w') as f: - for item in asset_rel_paths: - if os.path.splitext(item)[1] not in image_exts: - logging.warning(f"unsupported image ext in path: {item}") - continue - - asset_path = os.path.join(index_assets_prefix, item) - if format_type == ExportFormat.EXPORT_FORMAT_NO_ANNOTATION: - annotation_path = '' - else: - annotation_rel_path = _rel_annotation_path_for_asset(rel_asset_path=item, format_type=format_type) - annotation_path = os.path.join(index_annotations_prefix, annotation_rel_path) - f.write(f"{asset_path}\t{annotation_path}\n") - - -# private: export annotations: general -def _annotations_by_assets(mir_annotations: mirpb.MirAnnotations, class_type_ids: Optional[Set[int]], - base_task_id: str) -> Dict[str, List[mirpb.Annotation]]: - """ - get annotations by assets - - Args: - mir_annotations (mirpb.MirAnnotations): annotations - class_type_ids (Optional[Set[int]]): only type ids within it could be output, if None, no id filter applied - base_task_id (str): base task id - - Returns: - Dict, key: asset id, value: List[mirpb.Annotation] - """ - assets_to_det_annotations_dict = {} # type: Dict[str, List[mirpb.Annotation]] - - if base_task_id not in mir_annotations.task_annotations: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_MIR_REPO, - error_message=f"base task id: {base_task_id} not in mir_annotations") - - task_annotations = mir_annotations.task_annotations[base_task_id] - for asset_id, image_annotations in task_annotations.image_annotations.items(): - matched_annotations = [ - annotation for annotation in image_annotations.annotations - if (not class_type_ids or (annotation.class_id in class_type_ids)) - ] - assets_to_det_annotations_dict[asset_id] = matched_annotations - - return assets_to_det_annotations_dict - - -def _export_detect_annotations_to_path(asset_ids: List[str], format_type: ExportFormat, - mir_metadatas: mirpb.MirMetadatas, - annotations_dict: Dict[str, List[mirpb.Annotation]], - class_type_mapping: Optional[Dict[int, int]], dest_path: str, mir_root: str, - assert_id_filename_map: Dict[str, str]) -> None: - if not asset_ids: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty asset_ids') - if not mir_metadatas: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='invalid mir_metadatas') - - os.makedirs(dest_path, exist_ok=True) - - cls_id_mgr = class_ids.ClassIdManager(mir_root=mir_root) - - missing_counter = 0 - empty_counter = 0 - for asset_id in asset_ids: - if asset_id not in mir_metadatas.attributes: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"can not find asset id: {asset_id} in mir_metadatas") - attrs = mir_metadatas.attributes[asset_id] - - if asset_id not in annotations_dict: - missing_counter += 1 - annotations = [] - else: - annotations = annotations_dict[asset_id] - if len(annotations) == 0: - empty_counter += 1 - - format_func = format_file_output_func(anno_format=format_type) - asset_file_name = assert_id_filename_map[asset_id] - anno_str = format_func(asset_id=asset_id, - attrs=attrs, - annotations=annotations, - class_type_mapping=class_type_mapping, - cls_id_mgr=cls_id_mgr, - asset_filename=asset_file_name) - - annotation_file_path = os.path.join( - dest_path, _rel_annotation_path_for_asset(rel_asset_path=asset_file_name, format_type=format_type)) - os.makedirs(os.path.dirname(annotation_file_path), exist_ok=True) - with open(annotation_file_path, 'w') as f: - f.write(anno_str) - - logging.info(f"missing annotations: {missing_counter}, " - f"empty annotations: {empty_counter} out of {len(asset_ids)} assets") - - -def _single_image_annotations_to_ark(asset_id: str, attrs: Any, annotations: List[mirpb.Annotation], - class_type_mapping: Optional[Dict[int, int]], cls_id_mgr: class_ids.ClassIdManager, - asset_filename: str) -> str: - output_str = "" - for annotation in annotations: - mapped_id = class_type_mapping[annotation.class_id] if class_type_mapping else annotation.class_id - output_str += f"{mapped_id}, {annotation.box.x}, {annotation.box.y}, " - output_str += f"{annotation.box.x + annotation.box.w - 1}, {annotation.box.y + annotation.box.h - 1}\n" - return output_str - - -def _single_image_annotations_to_voc(asset_id: str, attrs: Any, annotations: List[mirpb.Annotation], - class_type_mapping: Optional[Dict[int, int]], cls_id_mgr: class_ids.ClassIdManager, - asset_filename: str) -> str: - # annotation - annotation_node = ElementTree.Element('annotation') - - # annotation: folder - folder_node = ElementTree.SubElement(annotation_node, 'folder') - folder_node.text = 'folder' - - # annotation: filename - filename_node = ElementTree.SubElement(annotation_node, 'filename') - filename_node.text = asset_filename - - # annotation: source - source_node = ElementTree.SubElement(annotation_node, 'source') - - # annotation: source: database - database_node = ElementTree.SubElement(source_node, 'database') - database_node.text = attrs.dataset_name or 'unknown' - - # annotation: source: annotation - annotation2_node = ElementTree.SubElement(source_node, 'annotation') - annotation2_node.text = 'unknown' - - # annotation: source: image - image_node = ElementTree.SubElement(source_node, 'image') - image_node.text = 'unknown' - - # annotation: size - size_node = ElementTree.SubElement(annotation_node, 'size') - - # annotation: size: width - width_node = ElementTree.SubElement(size_node, 'width') - width_node.text = str(attrs.width) - - # annotation: size: height - height_node = ElementTree.SubElement(size_node, 'height') - height_node.text = str(attrs.height) - - # annotation: size: depth - depth_node = ElementTree.SubElement(size_node, 'depth') - depth_node.text = str(attrs.image_channels) - - # annotation: segmented - segmented_node = ElementTree.SubElement(annotation_node, 'segmented') - segmented_node.text = '0' - - # annotation: object(s) - for annotation in annotations: - object_node = ElementTree.SubElement(annotation_node, 'object') - - name_node = ElementTree.SubElement(object_node, 'name') - name_node.text = cls_id_mgr.main_name_for_id(annotation.class_id) or 'unknown' - - pose_node = ElementTree.SubElement(object_node, 'pose') - pose_node.text = 'unknown' - - truncated_node = ElementTree.SubElement(object_node, 'truncated') - truncated_node.text = 'unknown' - - occluded_node = ElementTree.SubElement(object_node, 'occluded') - occluded_node.text = '0' - - bndbox_node = ElementTree.SubElement(object_node, 'bndbox') - - xmin_node = ElementTree.SubElement(bndbox_node, 'xmin') - xmin_node.text = str(annotation.box.x) - - ymin_node = ElementTree.SubElement(bndbox_node, 'ymin') - ymin_node.text = str(annotation.box.y) - - xmax_node = ElementTree.SubElement(bndbox_node, 'xmax') - xmax_node.text = str(annotation.box.x + annotation.box.w - 1) - - ymax_node = ElementTree.SubElement(bndbox_node, 'ymax') - ymax_node.text = str(annotation.box.y + annotation.box.h - 1) - - difficult_node = ElementTree.SubElement(object_node, 'difficult') - difficult_node.text = '0' - - return ElementTree.tostring(element=annotation_node, encoding='unicode') - - -def _single_image_annotations_to_ls_json(asset_id: str, attrs: Any, annotations: List[mirpb.Annotation], - class_type_mapping: Optional[Dict[int, int]], - cls_id_mgr: class_ids.ClassIdManager, asset_filename: str) -> str: - out_type = "predictions" # out_type: annotation type - "annotations" or "predictions" - to_name = 'image' # to_name: object name from Label Studio labeling config - from_name = 'label' # control tag name from Label Studio labeling config - task: Dict = { - out_type: [{ - "result": [], - "ground_truth": False, - }], - "data": { - "image": asset_filename - } - } - - for annotation in annotations: - bbox_x, bbox_y = float(annotation.box.x), float(annotation.box.y) - bbox_width, bbox_height = float(annotation.box.w), float(annotation.box.h) - img_width, img_height = attrs.width, attrs.height - item = { - "id": uuid.uuid4().hex[0:10], # random id to identify this annotation. - "type": "rectanglelabels", - "value": { - # Units of image annotations in label studio is percentage of image width/height. - # https://labelstud.io/guide/predictions.html#Units-of-image-annotations - "x": bbox_x / img_width * 100, - "y": bbox_y / img_height * 100, - "width": bbox_width / img_width * 100, - "height": bbox_height / img_height * 100, - "rotation": 0, - "rectanglelabels": [cls_id_mgr.main_name_for_id(annotation.class_id) or 'unknown'] - }, - "to_name": to_name, - "from_name": from_name, - "image_rotation": 0, - "original_width": img_width, - "original_height": img_height - } - task[out_type][0]['result'].append(item) - return json.dumps(task) diff --git a/ymir/command/mir/tools/det_eval.py b/ymir/command/mir/tools/det_eval_coco.py similarity index 53% rename from ymir/command/mir/tools/det_eval.py rename to ymir/command/mir/tools/det_eval_coco.py index b07c11237d..61c1463c46 100644 --- a/ymir/command/mir/tools/det_eval.py +++ b/ymir/command/mir/tools/det_eval_coco.py @@ -1,157 +1,100 @@ from collections import defaultdict -from typing import Any, List, Optional, Set, Union -from mir.tools.code import MirCode +from typing import Any, Dict, List, Optional, Set, Tuple, Union import numpy as np -from mir.tools import mir_storage_ops, revs_parser +from mir.tools import det_eval_utils +from mir.tools.code import MirCode from mir.tools.errors import MirRuntimeError from mir.protos import mir_command_pb2 as mirpb class MirCoco: - def __init__(self, mir_root: str, rev_tid: revs_parser.TypRevTid, conf_thr: float) -> None: - m: mirpb.MirMetadatas - a: mirpb.MirAnnotations - k: mirpb.MirKeywords - m, a, k, = mir_storage_ops.MirStorageOps.load_multiple_storages(mir_root=mir_root, - mir_branch=rev_tid.rev, - mir_task_id=rev_tid.tid, - ms_list=[ - mirpb.MirStorage.MIR_METADATAS, - mirpb.MirStorage.MIR_ANNOTATIONS, - mirpb.MirStorage.MIR_KEYWORDS, - ]) - if len(m.attributes) == 0: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='no assets in evaluated dataset') - if len(a.task_annotations[a.head_task_id].image_annotations) == 0: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='no annotations in evaluated dataset') + def __init__(self, task_annotations: mirpb.SingleTaskAnnotations, conf_thr: Optional[float]) -> None: + """ + creates MirCoco instance - self._mir_metadatas = m - self._mir_annotations = a + Args: + task_annotations (mirpb.SingleTaskAnnotations): pred or gt annotations + conf_thr (Optional[float]): lower bound of annotation confidence score + only annotation with confidence greater then conf_thr will be used. + if you wish to use all annotations, let conf_thr = None + """ + if len(task_annotations.image_annotations) == 0: + raise MirRuntimeError(error_code=MirCode.RC_CMD_NO_ANNOTATIONS, + error_message='no annotations in evaluated dataset') # ordered list of asset / image ids - self._ordered_asset_ids = sorted(list(self._mir_metadatas.attributes.keys())) - # key: asset id, value: index in `self._ordered_asset_ids` - self._asset_id_to_ordered_idxes = {asset_id: idx for idx, asset_id in enumerate(self._ordered_asset_ids)} - # ordered list of class / category ids - self._ordered_class_ids = sorted(list(k.index_predifined_keyids.keys())) - - self.img_cat_to_annotations = defaultdict(list) - annos = self._get_annotations(asset_idxes=self.get_asset_idxes(), - class_ids=self.get_class_ids(), - conf_thr=conf_thr) - for anno in annos: - self.img_cat_to_annotations[anno['asset_idx'], anno['class_id']].append(anno) - - self.dataset_id = rev_tid.rev_tid - - def load_dts_from_gt(self, mir_root: str, rev_tids: List[revs_parser.TypRevTid], - conf_thr: float) -> List['MirCoco']: - gt_asset_ids_set = set(self.get_asset_ids()) - mir_dts: List['MirCoco'] = [] - for rev_tid in rev_tids: - mir_dt = MirCoco(mir_root=mir_root, rev_tid=rev_tid, conf_thr=conf_thr) - if set(mir_dt.mir_metadatas.attributes.keys()) != gt_asset_ids_set: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='prediction and ground truth have different assets') - - mir_dts.append(mir_dt) - return mir_dts - - @property - def mir_metadatas(self) -> mirpb.MirMetadatas: - return self._mir_metadatas - - @property - def mir_annotations(self) -> mirpb.MirAnnotations: - return self._mir_annotations - - def _get_annotations(self, asset_idxes: List[int], class_ids: List[int], conf_thr: float) -> List[dict]: - """ - get all annotations list for asset ids and class ids + self.asset_ids = list(task_annotations.image_annotations.keys()) - if asset_idxes and class_ids provided, only returns filtered annotations + self.img_cat_to_annotations = self._aggregate_annotations(single_task_annotations=task_annotations, + conf_thr=conf_thr) + + def _aggregate_annotations(self, single_task_annotations: mirpb.SingleTaskAnnotations, + conf_thr: Optional[float]) -> Dict[Tuple[str, int], List[dict]]: + """ + aggregates annotations with confidence >= conf_thr into a dict with key: (asset id, class id) Args: - asset_idxes (List[int]): asset ids, if not provided, returns annotations for all images - class_ids (List[int]): class ids, if not provided, returns annotations for all classe - conf_thr (float): confidence threshold of bbox + single_task_annotations (mirpb.SingleTaskAnnotations): annotations + conf_thr (float): confidence threshold of bbox, set to None if you want all annotations Returns: - a list of annotations and asset ids + annotations dict with key: (asset idx, class id), value: annotations list, each element is a dict, and has following keys and values: - asset_id: str, image / asset id - asset_idx: int, position of asset id in `self.get_asset_ids()` - id: int, id for a single annotation - class_id: int, category / class id + id: int, global id for a single annotation area: int, area of bbox bbox: List[int], bounding box, xywh score: float, confidence of bbox iscrowd: always 0 because mir knows nothing about it + ignore: always 0 + pb_index_id: annotation.index in mir_annotations file """ - result_annotations_list: List[dict] = [] - - single_task_annotations = self._mir_annotations.task_annotations[self._mir_annotations.head_task_id] - if not asset_idxes: - asset_idxes = self.get_asset_idxes() + img_cat_to_annotations: Dict[Tuple[str, int], List[dict]] = defaultdict(list) annotation_idx = 1 - for asset_idx in asset_idxes: - asset_id = self._ordered_asset_ids[asset_idx] + for asset_id in self.asset_ids: if asset_id not in single_task_annotations.image_annotations: continue single_image_annotations = single_task_annotations.image_annotations[asset_id] - for annotation in single_image_annotations.annotations: - if class_ids and annotation.class_id not in class_ids: - continue - if annotation.score < conf_thr: + for annotation in single_image_annotations.boxes: + if conf_thr is not None and annotation.score < conf_thr: continue annotation_dict = { - 'asset_id': asset_id, - 'asset_idx': asset_idx, 'id': annotation_idx, - 'class_id': annotation.class_id, 'area': annotation.box.w * annotation.box.h, 'bbox': [annotation.box.x, annotation.box.y, annotation.box.w, annotation.box.h], 'score': annotation.score, 'iscrowd': 0, 'ignore': 0, + 'pb_index_id': annotation.index, } - result_annotations_list.append(annotation_dict) + img_cat_to_annotations[asset_id, annotation.class_id].append(annotation_dict) annotation_idx += 1 - return result_annotations_list - - def get_asset_ids(self) -> List[str]: - return self._ordered_asset_ids - - def get_asset_idxes(self) -> List[int]: - return list(range(len(self._ordered_asset_ids))) + return img_cat_to_annotations - def get_class_ids(self) -> List[int]: - return self._ordered_class_ids - -class MirDetEval: - def __init__(self, coco_gt: MirCoco, coco_dt: MirCoco, params: 'Params' = None): - self.cocoGt = coco_gt # ground truth COCO API - self.cocoDt = coco_dt # detections COCO API - self.evalImgs: list = [] # per-image per-category evaluation results [KxAxI] elements +class CocoDetEval: + def __init__(self, coco_gt: MirCoco, coco_dt: MirCoco, params: 'Params'): + self.evalImgs: dict = {} # per-image per-category evaluation results [KxAxI] elements self.eval: dict = {} # accumulated evaluation results - self._gts: dict = coco_gt.img_cat_to_annotations # gt for evaluation - self._dts: dict = coco_dt.img_cat_to_annotations # dt for evaluation - self.params = params or Params() # parameters + self.params = params self.stats: np.ndarray = np.zeros(1) # result summarization self.ious: dict = { } # key: (asset id, class id), value: ious ndarray of ith dt (sorted by score, desc) and jth gt - self.params.imgIdxes = coco_gt.get_asset_idxes() - self.params.catIds = coco_gt.get_class_ids() + + self._gts = defaultdict(list, coco_gt.img_cat_to_annotations) + self._dts = defaultdict(list, coco_dt.img_cat_to_annotations) + self._asset_ids: List[str] = sorted(set(coco_gt.asset_ids) | set(coco_dt.asset_ids)) + + self._coco_gt = coco_gt + self._coco_dt = coco_dt + + self.match_result = det_eval_utils.DetEvalMatchResult() def evaluate(self) -> None: ''' @@ -159,7 +102,6 @@ def evaluate(self) -> None: Returns: None SideEffects: - self.params.catIds / imgIdxes: duplicated class and asset ids will be removed self.params.maxDets: will be sorted self.ious: will be cauculated self.evalImgs: will be cauculated @@ -171,28 +113,28 @@ def evaluate(self) -> None: catIds = p.catIds # self.ious: key: (img_idx, class_id), value: ious ndarray of len(dts) * len(gts) - self.ious = {(imgIdx, catId): self.computeIoU(imgIdx, catId) for imgIdx in p.imgIdxes for catId in catIds} + self.ious = {(asset_id, catId): self.computeIoU(asset_id, catId) + for asset_id in self._asset_ids for catId in catIds} maxDet = p.maxDets[-1] - self.evalImgs = [ - self.evaluateImg(imgIdx, catId, areaRng, maxDet) for catId in catIds for areaRng in p.areaRng - for imgIdx in p.imgIdxes - ] + self.evalImgs = {(asset_id, cIdx, aIdx): self.evaluateImg(asset_id, catId, areaRng, maxDet) + for cIdx, catId in enumerate(catIds) for aIdx, areaRng in enumerate(p.areaRng) + for asset_id in self._asset_ids} - def computeIoU(self, imgIdx: int, catId: int) -> Union[np.ndarray, list]: + def computeIoU(self, asset_id: str, catId: int) -> Union[np.ndarray, list]: """ compute ious of detections and ground truth boxes of single image and class /category Args: - imgIdx (int): asset / image ordered idx + asset_id (str): asset id catId (int): category / class id Returns: ious ndarray of detections and ground truth boxes of single image and category ious[i][j] means the iou i-th detection (sorted by score, desc) and j-th ground truth box """ - gt = self._gts[imgIdx, catId] - dt = self._dts[imgIdx, catId] + gt = self._gts[asset_id, catId] + dt = self._dts[asset_id, catId] if len(gt) == 0 and len(dt) == 0: return [] @@ -232,12 +174,12 @@ def _single_iou(d_box: List[int], g_box: List[int], iscrowd: int) -> float: ious[d_idx, g_idx] = _single_iou(d_box, g_box, iscrowd[g_idx]) return ious - def evaluateImg(self, imgIdx: int, catId: int, aRng: Any, maxDet: int) -> Optional[dict]: + def evaluateImg(self, asset_id: str, catId: int, aRng: Any, maxDet: int) -> Optional[dict]: ''' perform evaluation for single category and image Args: - imgIdx (int): image / asset ordered index + asset_id (str): asset id catId (int): category / class id aRng (List[float]): area range (lower and upper bound) maxDet (int): @@ -245,8 +187,8 @@ def evaluateImg(self, imgIdx: int, catId: int, aRng: Any, maxDet: int) -> Option Returns: dict (single image results) ''' - gt = self._gts[imgIdx, catId] - dt = self._dts[imgIdx, catId] + gt = self._gts[asset_id, catId] + dt = self._dts[asset_id, catId] if len(gt) == 0 and len(dt) == 0: return None @@ -263,7 +205,8 @@ def evaluateImg(self, imgIdx: int, catId: int, aRng: Any, maxDet: int) -> Option dt = [dt[i] for i in dtind[0:maxDet]] iscrowd = [int(o['iscrowd']) for o in gt] # load computed ious - ious = self.ious[imgIdx, catId][:, gtind] if len(self.ious[imgIdx, catId]) > 0 else self.ious[imgIdx, catId] + ious = self.ious[asset_id, catId][:, gtind] if len(self.ious[asset_id, catId]) > 0 else self.ious[asset_id, + catId] p = self.params T = len(p.iouThrs) @@ -298,17 +241,17 @@ def evaluateImg(self, imgIdx: int, catId: int, aRng: Any, maxDet: int) -> Option dtIg[tind, dind] = gtIg[m] dtm[tind, dind] = gt[m]['id'] gtm[tind, m] = d['id'] + + self.match_result.add_match(asset_id=asset_id, + iou_thr=t, + gt_pb_idx=gt[m]['pb_index_id'], + pred_pb_idx=d['pb_index_id']) + # set unmatched detections outside of area range to ignore a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1] for d in dt]).reshape((1, len(dt))) dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0))) # store results for given image and category return { - 'image_id': imgIdx, - 'category_id': catId, - 'aRng': aRng, - 'maxDet': maxDet, - 'dtIds': [d['id'] for d in dt], - 'gtIds': [g['id'] for g in gt], 'dtMatches': dtm, 'gtMatches': gtm, 'dtScores': [d['score'] for d in dt], @@ -345,21 +288,16 @@ def accumulate(self, p: 'Params' = None) -> None: setK: set = set(catIds) setA: Set[tuple] = set(map(tuple, self.params.areaRng)) setM: set = set(self.params.maxDets) - setI: set = set(self.params.imgIdxes) # get inds to evaluate k_list = [n for n, k in enumerate(p.catIds) if k in setK] m_list = [m for n, m in enumerate(p.maxDets) if m in setM] a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] - i_list = [n for n, i in enumerate(p.imgIdxes) if i in setI] - I0 = len(self.params.imgIdxes) - A0 = len(self.params.areaRng) # retrieve E at each category, area range, and max number of detections - for k, k0 in enumerate(k_list): - Nk = k0 * A0 * I0 - for a, a0 in enumerate(a_list): - Na = a0 * I0 + for k, _ in enumerate(k_list): + for a, _ in enumerate(a_list): + # Na = a0 * I0 for m, maxDet in enumerate(m_list): - E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [self.evalImgs.get((asset_id, k, a), None) for asset_id in self._asset_ids] E = [e for e in E if e is not None] if len(E) == 0: continue @@ -432,7 +370,7 @@ def accumulate(self, p: 'Params' = None) -> None: 'all_fns': all_fns, } - def get_evaluation_result(self) -> mirpb.SingleDatasetEvaluation: + def get_evaluation_result(self, area_ranges_index: int, max_dets_index: int) -> mirpb.SingleDatasetEvaluation: if not self.eval: raise ValueError('Please run accumulate() first') @@ -441,29 +379,28 @@ def get_evaluation_result(self) -> mirpb.SingleDatasetEvaluation: # iou evaluations for iou_thr_index, iou_thr in enumerate(self.params.iouThrs): - iou_evaluation = self._get_iou_evaluation_result(iou_thr_index=iou_thr_index) + iou_evaluation = self._get_iou_evaluation_result(area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + iou_thr_index=iou_thr_index) evaluation_result.iou_evaluations[f"{iou_thr:.2f}"].CopyFrom(iou_evaluation) - # average evaluation - evaluation_result.iou_averaged_evaluation.CopyFrom(self._get_iou_evaluation_result()) - return evaluation_result - def _get_iou_evaluation_result(self, iou_thr_index: int = None) -> mirpb.SingleIouEvaluation: + def _get_iou_evaluation_result(self, + area_ranges_index: int, + max_dets_index: int, + iou_thr_index: int = None) -> mirpb.SingleIouEvaluation: iou_evaluation = mirpb.SingleIouEvaluation() # ci evaluations: category / class ids for class_id_index, class_id in enumerate(self.params.catIds): - topic_evaluation = self._get_topic_evaluation_result(iou_thr_index, class_id_index) - iou_evaluation.ci_evaluations[class_id].CopyFrom(topic_evaluation) - # class average - topic_evaluation = self._get_topic_evaluation_result(iou_thr_index, None) - iou_evaluation.ci_averaged_evaluation.CopyFrom(topic_evaluation) + ee = self._get_evaluation_element(iou_thr_index, class_id_index, area_ranges_index, max_dets_index) + iou_evaluation.ci_evaluations[class_id].CopyFrom(ee) return iou_evaluation - def _get_topic_evaluation_result(self, iou_thr_index: Optional[int], - class_id_index: Optional[int]) -> mirpb.SingleTopicEvaluation: + def _get_evaluation_element(self, iou_thr_index: Optional[int], class_id_index: Optional[int], + area_ranges_index: int, max_dets_index: int) -> mirpb.SingleEvaluationElement: def _get_tp_tn_or_fn(iou_thr_index: Optional[int], class_id_index: Optional[int], area_ranges_index: int, max_dets_index: int, array: np.ndarray) -> int: """ @@ -480,11 +417,7 @@ def _get_tp_tn_or_fn(iou_thr_index: Optional[int], class_id_index: Optional[int] array = np.sum(array[:, :, area_ranges_index, max_dets_index], axis=1) return int(array[0]) - topic_evaluation = mirpb.SingleTopicEvaluation() - - # from _summarize - area_ranges_index = 0 # area range: 'all' - max_dets_index = len(self.params.maxDets) - 1 # last max det number + ee = mirpb.SingleEvaluationElement() # average precision # precision dims: iouThrs * recThrs * catIds * areaRanges * maxDets @@ -496,7 +429,7 @@ def _get_tp_tn_or_fn(iou_thr_index: Optional[int], class_id_index: Optional[int] else: precisions = precisions[:, :, :, area_ranges_index, max_dets_index] precisions[precisions <= -1] = 0 - topic_evaluation.ap = np.mean(precisions) if len(precisions) > 0 else -1 + ee.ap = np.mean(precisions) if len(precisions) > 0 else -1 # average recall # recall dims: iouThrs * catIds * areaRanges * maxDets @@ -508,134 +441,114 @@ def _get_tp_tn_or_fn(iou_thr_index: Optional[int], class_id_index: Optional[int] else: recalls = recalls[:, :, area_ranges_index, max_dets_index] recalls[recalls <= -1] = 0 - topic_evaluation.ar = np.mean(recalls) if len(recalls) > 0 else -1 + ee.ar = np.mean(recalls) if len(recalls) > 0 else -1 # true positive - topic_evaluation.tp = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, - class_id_index=class_id_index, - area_ranges_index=area_ranges_index, - max_dets_index=max_dets_index, - array=self.eval['all_tps']) + ee.tp = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, + class_id_index=class_id_index, + area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + array=self.eval['all_tps']) # false positive - topic_evaluation.fp = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, - class_id_index=class_id_index, - area_ranges_index=area_ranges_index, - max_dets_index=max_dets_index, - array=self.eval['all_fps']) + ee.fp = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, + class_id_index=class_id_index, + area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + array=self.eval['all_fps']) # false negative - topic_evaluation.fn = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, - class_id_index=class_id_index, - area_ranges_index=area_ranges_index, - max_dets_index=max_dets_index, - array=self.eval['all_fns']) + ee.fn = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, + class_id_index=class_id_index, + area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + array=self.eval['all_fns']) # pr curve - if self.params.need_pr_curve and iou_thr_index is not None and class_id_index is not None: - precisions = self.eval['precision'][iou_thr_index, :, class_id_index, area_ranges_index, max_dets_index] - for recall_thr_index, recall_thr in enumerate(self.params.recThrs): - pr_point = mirpb.FloatPoint(x=recall_thr, y=precisions[recall_thr_index]) - topic_evaluation.pr_curve.append(pr_point) - - return topic_evaluation - - def summarize(self) -> None: - ''' - Compute and display summary metrics for evaluation results. - Note this functin can *only* be applied on the default parameter setting - ''' - def _summarize(ap: int = 1, iouThr: float = None, areaRng: str = 'all', maxDets: int = 100) -> float: - p = self.params - - aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] # areaRanges index - mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] # maxDets index - if ap == 1: - # dimension of precision: [TxRxKxAxM] iouThrs * recThrs * catIds * areaRanges * maxDets - s = self.eval['precision'] - # IoU - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, :, aind, mind] + if self.params.need_pr_curve: + # self.eval['precision'] dims: iouThrs * recThrs * catIds * areaRanges * maxDets + # precisions dims: iouThrs * recThrs * catIds + precisions = self.eval['precision'][:, :, :, area_ranges_index, max_dets_index] + scores = self.eval['scores'][:, :, :, area_ranges_index, max_dets_index] + + # TODO: hotfix, need to test with 3rd party pr curve result + precisions = np.maximum(0, precisions) + scores = np.maximum(0, scores) + + # from dims: iouThrs * recThrs * catIds + # to dims: recThrs * catIds + if iou_thr_index is not None: + precisions = precisions[iou_thr_index, :, :] + scores = scores[iou_thr_index, :, :] else: - # dimension of recall: [TxKxAxM] iouThrs * catIds * areaRanges * maxDets - s = self.eval['recall'] - if iouThr is not None: - t = np.where(iouThr == p.iouThrs)[0] - s = s[t] - s = s[:, :, aind, mind] - if len(s[s > -1]) == 0: - mean_s = -1 + precisions = np.mean(precisions, axis=0) + scores = np.mean(scores, axis=0) + + # from dims: recThrs * catIds + # to dims: recThrs + if class_id_index is not None: + precisions = precisions[:, class_id_index] + scores = scores[:, class_id_index] else: - mean_s = np.mean(s[s > -1]) - return mean_s - - def _summarizeDets() -> np.ndarray: - stats = np.zeros((12, )) - stats[0] = _summarize(1) - stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2]) - stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2]) - stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2]) - stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2]) - stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2]) - stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) - stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) - stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) - stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2]) - stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2]) - stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2]) - return stats + precisions = np.mean(precisions, axis=1) + scores = np.mean(scores, axis=1) - if not self.eval: - raise Exception('Please run accumulate() first') - self.stats = _summarizeDets() + for recall_thr_index, recall_thr in enumerate(self.params.recThrs): + pr_point = mirpb.FloatPoint(x=recall_thr, y=precisions[recall_thr_index], z=scores[recall_thr_index]) + ee.pr_curve.append(pr_point) + + return ee class Params: def __init__(self) -> None: self.iouType = 'bbox' self.catIds: List[int] = [] - self.imgIdxes: List[int] = [] # np.arange causes trouble. the data point on arange is slightly larger than the true value self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) # iou threshold self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True) # recall threshold - self.maxDets = [1, 10, 100] - self.areaRng: List[list] = [[0**2, 1e5**2], [0**2, 32**2], [32**2, 96**2], [96**2, 1e5**2]] # area range + self.maxDets = [100] # only one maxDet, origin: [1, 10, 100] + # [[0**2, 1e5**2], [0**2, 32**2], [32**2, 96**2], [96**2, 1e5**2]] # area range + self.areaRng: List[list] = [[0**2, 1e5**2]] # use all. self.areaRngLbl = ['all', 'small', 'medium', 'large'] # area range label self.confThr = 0.3 # confidence threshold self.need_pr_curve = False -def det_evaluate(mir_dts: List[MirCoco], mir_gt: MirCoco, config: mirpb.EvaluateConfig) -> mirpb.Evaluation: - iou_thr_from, iou_thr_to, iou_thr_step = [float(v) for v in config.iou_thrs_interval.split(':')] - for thr in [config.conf_thr, iou_thr_from, iou_thr_to, iou_thr_step]: - if thr < 0 or thr > 1: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='invalid conf_thr, iou_thr_from, iou_thr_to or iou_thr_step') - if iou_thr_from >= iou_thr_to: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='invalid iou_thr_from or iou_thr_to') +def det_evaluate(prediction: mirpb.SingleTaskAnnotations, ground_truth: mirpb.SingleTaskAnnotations, + config: mirpb.EvaluateConfig) -> mirpb.Evaluation: + evaluation = mirpb.Evaluation() + evaluation.config.CopyFrom(config) + params = Params() params.confThr = config.conf_thr - params.iouThrs = np.linspace(start=iou_thr_from, - stop=iou_thr_to, - num=int(np.round((iou_thr_to - iou_thr_from) / iou_thr_step)), - endpoint=False) + params.iouThrs = det_eval_utils.get_iou_thrs_array(config.iou_thrs_interval) params.need_pr_curve = config.need_pr_curve + params.catIds = list(config.class_ids) - evaluation = mirpb.Evaluation() - evaluation.config.CopyFrom(config) + area_ranges_index = 0 # area range: 'all' + max_dets_index = len(params.maxDets) - 1 # last max det number + + mir_gt = MirCoco(task_annotations=ground_truth, conf_thr=None) + mir_dt = MirCoco(task_annotations=prediction, conf_thr=config.conf_thr) + + evaluator = CocoDetEval(coco_gt=mir_gt, coco_dt=mir_dt, params=params) + evaluator.evaluate() + evaluator.accumulate() + + det_eval_utils.write_confusion_matrix(gt_annotations=ground_truth, + pred_annotations=prediction, + class_ids=params.catIds, + conf_thr=config.conf_thr, + match_result=evaluator.match_result, + iou_thr=params.iouThrs[0]) - for mir_dt in mir_dts: - evaluator = MirDetEval(coco_gt=mir_gt, coco_dt=mir_dt, params=params) - evaluator.evaluate() - evaluator.accumulate() + single_dataset_evaluation = evaluator.get_evaluation_result(area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index) + det_eval_utils.calc_averaged_evaluations(dataset_evaluation=single_dataset_evaluation, class_ids=params.catIds) - single_dataset_evaluation = evaluator.get_evaluation_result() - single_dataset_evaluation.conf_thr = config.conf_thr - single_dataset_evaluation.gt_dataset_id = mir_gt.dataset_id - single_dataset_evaluation.pred_dataset_id = mir_dt.dataset_id - evaluation.dataset_evaluations[mir_dt.dataset_id].CopyFrom(single_dataset_evaluation) + single_dataset_evaluation.conf_thr = config.conf_thr + evaluation.dataset_evaluation.CopyFrom(single_dataset_evaluation) + evaluation.state = mirpb.EvaluationState.ES_READY return evaluation diff --git a/ymir/command/mir/tools/det_eval_ctl_ops.py b/ymir/command/mir/tools/det_eval_ctl_ops.py new file mode 100644 index 0000000000..cd67241113 --- /dev/null +++ b/ymir/command/mir/tools/det_eval_ctl_ops.py @@ -0,0 +1,85 @@ +from functools import partial +from typing import Collection, Optional + +from mir.tools import det_eval_ops, mir_storage_ops, revs_parser, settings as mir_settings +from mir.protos import mir_command_pb2 as mirpb + + +def det_evaluate_datasets( + mir_root: str, + gt_rev_tid: revs_parser.TypRevTid, + pred_rev_tid: revs_parser.TypRevTid, + evaluate_config: mirpb.EvaluateConfig, +) -> Optional[mirpb.Evaluation]: + gt_mir_annotations: mirpb.MirAnnotations = mir_storage_ops.MirStorageOps.load_single_storage( + mir_root=mir_root, mir_branch=gt_rev_tid.rev, mir_task_id=gt_rev_tid.tid, ms=mirpb.MirStorage.MIR_ANNOTATIONS) + ground_truth = gt_mir_annotations.ground_truth + + if pred_rev_tid != gt_rev_tid: + pred_mir_annotations = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=mir_root, + mir_branch=pred_rev_tid.rev, + mir_task_id=pred_rev_tid.tid, + ms=mirpb.MirStorage.MIR_ANNOTATIONS) + else: + pred_mir_annotations = gt_mir_annotations + prediction = pred_mir_annotations.prediction + + # evaluate + evaluation = det_eval_ops.det_evaluate_with_pb( + prediction=prediction, + ground_truth=ground_truth, + config=evaluate_config, + ) + if evaluation.state != mirpb.EvaluationState.ES_READY: + return None + + # evaluate with ck + if evaluate_config.main_ck: + mir_keywords: mirpb.MirKeywords = mir_storage_ops.MirStorageOps.load_single_storage( + mir_root=mir_root, + mir_branch=pred_rev_tid.rev, + mir_task_id=pred_rev_tid.tid, + ms=mirpb.MirStorage.MIR_KEYWORDS) + + if evaluate_config.main_ck not in mir_keywords.ck_idx: + return None + + ck_evaluate_config = mirpb.EvaluateConfig() + ck_evaluate_config.CopyFrom(evaluate_config) + ck_evaluate_config.need_pr_curve = False + ck_idx = mir_keywords.ck_idx[ck_evaluate_config.main_ck] + ck_evaluate_func = partial(_evaluate_on_asset_ids, ground_truth, prediction, ck_evaluate_config) + + # fill main ck. + ck_evaluate_func(ck_idx.asset_annos, evaluation.main_ck) + # fill sub ck. + for idx, (sub_ck, asset_anno_ids) in enumerate(ck_idx.sub_indexes.items()): + if idx >= mir_settings.DEFAULT_EVALUATE_SUB_CKS: + break + ck_evaluate_func(asset_anno_ids.key_ids, evaluation.sub_cks[sub_ck]) + + return evaluation + + +def _evaluate_on_asset_ids(gt: mirpb.SingleTaskAnnotations, pred: mirpb.SingleTaskAnnotations, + evaluate_config: mirpb.EvaluateConfig, asset_ids: Collection[str], + target: mirpb.SingleDatasetEvaluation) -> None: + pred = _filter_task_annotations_by_asset_ids(task_annotations=pred, asset_ids=asset_ids) + gt = _filter_task_annotations_by_asset_ids(task_annotations=gt, asset_ids=asset_ids) + evaluation = det_eval_ops.det_evaluate_with_pb( + prediction=pred, + ground_truth=gt, + config=evaluate_config, + ) + if evaluation.state == mirpb.EvaluationState.ES_READY: + target.CopyFrom(evaluation.dataset_evaluation) + + +def _filter_task_annotations_by_asset_ids(task_annotations: mirpb.SingleTaskAnnotations, + asset_ids: Collection[str]) -> mirpb.SingleTaskAnnotations: + filtered_task_annotations = mirpb.SingleTaskAnnotations() + for asset_id in asset_ids: + if asset_id not in task_annotations.image_annotations: + continue + filtered_task_annotations.image_annotations[asset_id].CopyFrom(task_annotations.image_annotations[asset_id]) + return filtered_task_annotations diff --git a/ymir/command/mir/tools/det_eval_ops.py b/ymir/command/mir/tools/det_eval_ops.py new file mode 100644 index 0000000000..03c4052191 --- /dev/null +++ b/ymir/command/mir/tools/det_eval_ops.py @@ -0,0 +1,68 @@ +import logging +import time + +from mir.tools import det_eval_coco, det_eval_voc, settings as mir_settings +from mir.tools.code import MirCode +from mir.tools.errors import MirRuntimeError +from mir.protos import mir_command_pb2 as mirpb + + +def det_evaluate_with_pb( + prediction: mirpb.SingleTaskAnnotations, + ground_truth: mirpb.SingleTaskAnnotations, + config: mirpb.EvaluateConfig, + mode: str = 'voc', # voc or coco +) -> mirpb.Evaluation: + if config.conf_thr < 0 or config.conf_thr > 1: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='invalid conf_thr') + + if not config.class_ids: + config.class_ids.extend(prediction.eval_class_ids) + + evaluation = mirpb.Evaluation() + evaluation.config.CopyFrom(config) + if not config.class_ids: + logging.warning('skip evaluation: no evaluate class ids') + evaluation.state = mirpb.EvaluationState.ES_NO_CLASS_IDS + return evaluation + gt_cnt = len(ground_truth.image_annotations) + pred_cnt = len(prediction.image_annotations) + if gt_cnt == 0 or pred_cnt == 0: + logging.warning('skip evaluation: no gt or pred') + evaluation.state = mirpb.EvaluationState.ES_NO_GT_OR_PRED + return evaluation + if (len(config.class_ids) > mir_settings.MAX_EVALUATION_CLASS_IDS_COUNT + or max(gt_cnt, pred_cnt) > mir_settings.MAX_EVALUATION_ASSETS_COUNT): + logging.warning(f"skip evaluation: too many class ids, gt or pred, cis: {len(config.class_ids)}, " + f"pred: {pred_cnt}, gt: {gt_cnt}") + evaluation.state = mirpb.EvaluationState.ES_EXCEEDS_LIMIT + return evaluation + + start_time = time.time() + + for image_annotations in prediction.image_annotations.values(): + for annotation in image_annotations.boxes: + annotation.cm = mirpb.ConfusionMatrixType.IGNORED + annotation.det_link_id = -1 + for image_annotations in ground_truth.image_annotations.values(): + for annotation in image_annotations.boxes: + annotation.cm = mirpb.ConfusionMatrixType.IGNORED + annotation.det_link_id = -1 + eval_model_name = det_eval_voc if mode == 'voc' else det_eval_coco + evaluation = eval_model_name.det_evaluate( # type: ignore + prediction=prediction, ground_truth=ground_truth, config=config) + + logging.info(f"|-det_evaluate_with_pb-eval costs {(time.time() - start_time):.2f}s.") + + _show_evaluation(evaluation=evaluation) + + return evaluation + + +def _show_evaluation(evaluation: mirpb.Evaluation) -> None: + ciae = evaluation.dataset_evaluation.iou_averaged_evaluation.ci_averaged_evaluation + logging.info(f"evaluation result: mAP: {ciae.ap}") + + for class_id, see in evaluation.dataset_evaluation.iou_averaged_evaluation.ci_evaluations.items(): + if see.ap > 0: + logging.info(f" class id: {class_id}, mAP: {see.ap}") diff --git a/ymir/command/mir/tools/det_eval_utils.py b/ymir/command/mir/tools/det_eval_utils.py new file mode 100644 index 0000000000..2cdf15af3f --- /dev/null +++ b/ymir/command/mir/tools/det_eval_utils.py @@ -0,0 +1,117 @@ +from collections import defaultdict +from typing import Collection, Dict, List, Set, Tuple + +import numpy as np + +from mir.tools.code import MirCode +from mir.tools.errors import MirRuntimeError +from mir.protos import mir_command_pb2 as mirpb + + +class _DetEvalIouMatchResult: + def __init__(self) -> None: + self._gt_pred_match: Dict[str, Set[Tuple[int, int]]] = defaultdict(set) + + def add_match(self, asset_id: str, gt_pb_idx: int, pred_pb_idx: int) -> None: + self._gt_pred_match[asset_id].add((gt_pb_idx, pred_pb_idx)) + + @property + def gt_pred_match(self) -> Dict[str, Set[Tuple[int, int]]]: + return self._gt_pred_match + + +class DetEvalMatchResult: + def __init__(self) -> None: + self._iou_matches: Dict[float, _DetEvalIouMatchResult] = defaultdict(_DetEvalIouMatchResult) + + def add_match(self, asset_id: str, iou_thr: float, gt_pb_idx: int, pred_pb_idx: int) -> None: + self._iou_matches[iou_thr].add_match(asset_id=asset_id, gt_pb_idx=gt_pb_idx, pred_pb_idx=pred_pb_idx) + + def get_asset_ids(self, iou_thr: float) -> Collection[str]: + return self._iou_matches[iou_thr].gt_pred_match.keys() if iou_thr in self._iou_matches else [] + + def get_matches(self, asset_id: str, iou_thr: float) -> Collection[Tuple[int, int]]: + return self._iou_matches[iou_thr].gt_pred_match[asset_id] + + +def get_iou_thrs_array(iou_thrs_str: str) -> np.ndarray: + iou_thrs = [float(v) for v in iou_thrs_str.split(':')] + if len(iou_thrs) == 3: + iou_thr_from, iou_thr_to, iou_thr_step = iou_thrs + elif len(iou_thrs) == 1: + iou_thr_from, iou_thr_to, iou_thr_step = iou_thrs[0], iou_thrs[0], 0 + else: + raise ValueError(f"invalid iou thrs str: {iou_thrs_str}") + for thr in [iou_thr_from, iou_thr_to, iou_thr_step]: + if thr < 0 or thr > 1: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='invalid iou_thr_from, iou_thr_to or iou_thr_step') + if iou_thr_from > iou_thr_to: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='invalid iou_thr_from or iou_thr_to') + + if iou_thr_to == iou_thr_from: + return np.array([iou_thr_from]) + return np.linspace(start=iou_thr_from, + stop=iou_thr_to, + num=int(np.round((iou_thr_to - iou_thr_from) / iou_thr_step)), + endpoint=False) + + +def calc_averaged_evaluations(dataset_evaluation: mirpb.SingleDatasetEvaluation, class_ids: Collection[int]) -> None: + for iou_evaluation in dataset_evaluation.iou_evaluations.values(): + _get_average_ee(average_ee=iou_evaluation.ci_averaged_evaluation, + ees=list(iou_evaluation.ci_evaluations.values())) + + for class_id in class_ids: + _get_average_ee(average_ee=dataset_evaluation.iou_averaged_evaluation.ci_evaluations[class_id], + ees=[x.ci_evaluations[class_id] for x in dataset_evaluation.iou_evaluations.values()]) + + _get_average_ee(average_ee=dataset_evaluation.iou_averaged_evaluation.ci_averaged_evaluation, + ees=[x.ci_averaged_evaluation for x in dataset_evaluation.iou_evaluations.values()]) + + +def _get_average_ee(average_ee: mirpb.SingleEvaluationElement, ees: List[mirpb.SingleEvaluationElement]) -> None: + if not ees: + return + + if len(ees) == 1: + average_ee.CopyFrom(ees[0]) + del average_ee.pr_curve[:] + return + + for ee in ees: + average_ee.ap += ee.ap + average_ee.ar += ee.ar + average_ee.tp += ee.tp + average_ee.fp += ee.fp + average_ee.fn += ee.fn + + ees_cnt = len(ees) + average_ee.ap /= ees_cnt + average_ee.ar /= ees_cnt + + +def write_confusion_matrix(gt_annotations: mirpb.SingleTaskAnnotations, pred_annotations: mirpb.SingleTaskAnnotations, + class_ids: List[int], conf_thr: float, match_result: DetEvalMatchResult, + iou_thr: float) -> None: + class_ids_set = set(class_ids) + for image_annotations in gt_annotations.image_annotations.values(): + for annotation in image_annotations.boxes: + annotation.cm = (mirpb.ConfusionMatrixType.FN + if annotation.class_id in class_ids_set else mirpb.ConfusionMatrixType.IGNORED) + annotation.det_link_id = -1 + for image_annotations in pred_annotations.image_annotations.values(): + for annotation in image_annotations.boxes: + annotation.cm = (mirpb.ConfusionMatrixType.FP if annotation.class_id in class_ids_set + and annotation.score >= conf_thr else mirpb.ConfusionMatrixType.IGNORED) + annotation.det_link_id = -1 + + for asset_id in match_result.get_asset_ids(iou_thr=iou_thr): + id_to_gts = {box.index: box for box in gt_annotations.image_annotations[asset_id].boxes} + id_to_preds = {box.index: box for box in pred_annotations.image_annotations[asset_id].boxes} + for gt_pb_index, pred_pb_index in match_result.get_matches(asset_id=asset_id, iou_thr=iou_thr): + id_to_gts[gt_pb_index].cm = mirpb.ConfusionMatrixType.MTP + id_to_gts[gt_pb_index].det_link_id = pred_pb_index + id_to_preds[pred_pb_index].cm = mirpb.ConfusionMatrixType.TP + id_to_preds[pred_pb_index].det_link_id = gt_pb_index diff --git a/ymir/command/mir/tools/det_eval_voc.py b/ymir/command/mir/tools/det_eval_voc.py new file mode 100644 index 0000000000..b5b0184ddc --- /dev/null +++ b/ymir/command/mir/tools/det_eval_voc.py @@ -0,0 +1,267 @@ +# Copyright (c) 2017-present, Facebook, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################## +# +# Based on: +# -------------------------------------------------------- +# Fast/er R-CNN +# Licensed under The MIT License [see LICENSE for details] +# Written by Bharath Hariharan +# -------------------------------------------------------- +"""Python implementation of the PASCAL VOC devkit's AP evaluation code.""" + +from typing import Any, Dict, List + +import numpy as np + +from mir.protos import mir_command_pb2 as mirpb +from mir.tools import det_eval_utils +from mir.tools.det_eval_utils import DetEvalMatchResult + + +def _voc_ap(rec: np.ndarray, prec: np.ndarray, use_07_metric: bool) -> float: + """Compute VOC AP given precision and recall. If use_07_metric is true, uses + the VOC 07 11-point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0. + for t in np.arange(0., 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11. + else: + # correct AP calculation + # first append sentinel values at the end + mrec: np.ndarray = np.concatenate(([0.], rec, [1.])) # type: ignore + mpre: np.ndarray = np.concatenate(([0.], prec, [0.])) # type: ignore + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] # type: ignore + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def _voc_eval(class_recs: Dict[str, Dict[str, Any]], BB: np.ndarray, confidence: np.ndarray, image_ids: List[str], + pred_pb_index_ids: List[int], match_result: det_eval_utils.DetEvalMatchResult, ovthresh: float, npos: int, + use_07_metric: bool) -> Dict[str, Any]: + """ + gt: class_recs + pred: BB, confidence, image_ids, pred_pb_index_ids + """ + if len(image_ids) == 0: + return { + 'rec': [], + 'prec': [], + 'conf': [], + 'ap': 0, + 'ar': 0, + 'tp': 0, + 'fp': 0, + 'fn': 0, + } + + # `BB` and `image_ids`: sort desc by confidence + sorted_ind = np.argsort(-confidence) + BB = BB[sorted_ind, :] + image_ids = [image_ids[x] for x in sorted_ind] + pred_pb_index_ids = [pred_pb_index_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) # 0 or 1, tp[d] == 1 means BB[d] is true positive + fp = np.zeros(nd) # 0 or 1, tp[d] == 1 means BB[d] is false positive + for d in range(nd): + if image_ids[d] not in class_recs: + continue + + R = class_recs[image_ids[d]] # gt of that image name + bb = BB[d, :].astype(float) # single prediction box, shape: (1, 4) + ovmax = -np.inf + BBGT: np.ndarray = R['bbox'].astype(float) # gt boxes of that image name, shape: (*, 4), x1, y1, x2, y2 + + if BBGT.size > 0: + # compute overlaps + # intersection + ixmin = np.maximum(BBGT[:, 0], bb[0]) + iymin = np.maximum(BBGT[:, 1], bb[1]) + ixmax = np.minimum(BBGT[:, 2], bb[2]) + iymax = np.minimum(BBGT[:, 3], bb[3]) + iw = np.maximum(ixmax - ixmin + 1., 0.) + ih = np.maximum(iymax - iymin + 1., 0.) + inters = iw * ih + + # union + area1 = (bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) + area2 = (BBGT[:, 2] - BBGT[:, 0] + 1.) * (BBGT[:, 3] - BBGT[:, 1] + 1.) + uni = (area1 + area2 - inters) + + overlaps = inters / uni + ovmax = np.max(overlaps) + jmax = np.argmax(overlaps) + + if ovmax > ovthresh: + if not R['difficult'][jmax]: + if not R['det'][jmax]: + # pred `d` matched to gt `jmax` + tp[d] = 1. + R['det'][jmax] = 1 + + match_result.add_match(asset_id=image_ids[d], + iou_thr=ovthresh, + gt_pb_idx=class_recs[image_ids[d]]['pb_index_ids'][jmax], + pred_pb_idx=pred_pb_index_ids[d]) + else: + # jmax previously matched to another + fp[d] = 1. + else: + # pred `d` not matched to anything + fp[d] = 1. + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos) # recalls + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) # precisions + ap: float = _voc_ap(rec, prec, use_07_metric) + + tp_cnt = int(tp[-1]) if len(tp) > 0 else 0 + fp_cnt = int(fp[-1]) if len(fp) > 0 else 0 + fn_cnt = npos - tp_cnt + + return { + 'rec': rec, + 'prec': prec, + 'conf': confidence[sorted_ind], + 'ap': ap, + 'ar': np.mean(rec), + 'tp': tp_cnt, + 'fp': fp_cnt, + 'fn': fn_cnt, + } + + +def _get_single_evaluate_element(prediction: mirpb.SingleTaskAnnotations, ground_truth: mirpb.SingleTaskAnnotations, + match_result: det_eval_utils.DetEvalMatchResult, class_id: int, iou_thr: float, + conf_thr: float, need_pr_curve: bool) -> mirpb.SingleEvaluationElement: + # convert data structure + # convert gt, save to `class_recs` + class_recs: Dict[str, Dict[str, Any]] = {} + npos = 0 + for asset_id, image_annotations in ground_truth.image_annotations.items(): + img_gts = [x for x in image_annotations.boxes if x.class_id == class_id] + if len(img_gts) == 0: + continue + + # bbox: shape: (len(annos), 4), type: int, x1y1x2y2 + bbox = np.array([[x.box.x, x.box.y, x.box.x + x.box.w, x.box.y + x.box.h] for x in img_gts]) + difficult = np.array([False] * len(img_gts)) # shape: (len(annos),) + det = [False] * len(img_gts) # 1: have matched detections, 0: not matched yet + npos = npos + sum(~difficult) + pb_index_ids = [x.index for x in img_gts] + + class_recs[asset_id] = { + 'bbox': bbox, + 'difficult': difficult, + 'det': det, + 'pb_index_ids': pb_index_ids, + } + + # convert det + image_ids: List[str] = [] + confidence = [] + bboxes: List[List[int]] = [] + pred_pb_index_ids: List[int] = [] + for asset_id, image_annotations in prediction.image_annotations.items(): + img_preds = [x for x in image_annotations.boxes if x.class_id == class_id and x.score > conf_thr] + for annotation in img_preds: + box = annotation.box + bboxes.append([box.x, box.y, box.x + box.w, box.y + box.h]) + image_ids.extend([asset_id] * len(img_preds)) + confidence.extend([x.score for x in img_preds]) + pred_pb_index_ids.extend([x.index for x in img_preds]) + BB = np.array(bboxes) + + # voc eval + # matches: set to save match result, each element: (asset_id, gt_pb_index, pred_pb_index) + eval_result = _voc_eval(class_recs=class_recs, + BB=BB, + confidence=np.array(confidence), + image_ids=image_ids, + pred_pb_index_ids=pred_pb_index_ids, + match_result=match_result, + ovthresh=iou_thr, + npos=npos, + use_07_metric=True) + + # voc_eval to get result + see = mirpb.SingleEvaluationElement(ap=eval_result['ap'], + ar=eval_result['ar'], + tp=eval_result['tp'], + fp=eval_result['fp'], + fn=eval_result['fn']) + + if need_pr_curve: + rec = eval_result['rec'] + prec = eval_result['prec'] + conf = eval_result['conf'] + + for i in range(len(rec)): + see.pr_curve.append(mirpb.FloatPoint(x=rec[i], y=prec[i], z=conf[i])) + + return see + + +def det_evaluate(prediction: mirpb.SingleTaskAnnotations, ground_truth: mirpb.SingleTaskAnnotations, + config: mirpb.EvaluateConfig) -> mirpb.Evaluation: + evaluation = mirpb.Evaluation() + evaluation.config.CopyFrom(config) + + class_ids = list(config.class_ids) + iou_thrs = det_eval_utils.get_iou_thrs_array(config.iou_thrs_interval) + + single_dataset_evaluation = evaluation.dataset_evaluation + single_dataset_evaluation.conf_thr = config.conf_thr + + for iou_thr in iou_thrs: + match_result = DetEvalMatchResult() + for class_id in class_ids: + see = _get_single_evaluate_element(prediction=prediction, + ground_truth=ground_truth, + class_id=class_id, + iou_thr=iou_thr, + conf_thr=config.conf_thr, + need_pr_curve=config.need_pr_curve, + match_result=match_result) + single_dataset_evaluation.iou_evaluations[f"{iou_thr:.2f}"].ci_evaluations[class_id].CopyFrom(see) + + det_eval_utils.write_confusion_matrix(gt_annotations=ground_truth, + pred_annotations=prediction, + class_ids=class_ids, + conf_thr=config.conf_thr, + match_result=match_result, + iou_thr=iou_thrs[0]) + det_eval_utils.calc_averaged_evaluations(dataset_evaluation=single_dataset_evaluation, class_ids=class_ids) + + evaluation.state = mirpb.EvaluationState.ES_READY + return evaluation diff --git a/ymir/command/mir/tools/env_config.py b/ymir/command/mir/tools/env_config.py new file mode 100644 index 0000000000..cb89294353 --- /dev/null +++ b/ymir/command/mir/tools/env_config.py @@ -0,0 +1,79 @@ +import linecache +import os +from pydantic import BaseModel + +import yaml + +from mir import version +from mir.tools import settings as mir_settings + + +# see also: sample_executor/ef/env.py +class _EnvInputConfig(BaseModel): + root_dir: str = '/in' + assets_dir: str = '/in/assets' + annotations_dir: str = '/in/annotations' + models_dir: str = '/in/models' + training_index_file: str = '' + val_index_file: str = '' + candidate_index_file: str = '' + config_file: str = '/in/config.yaml' + + +class _EnvOutputConfig(BaseModel): + root_dir: str = '/out' + models_dir: str = '/out/models' + tensorboard_dir: str = '/out/tensorboard' + training_result_file: str = '/out/models/result.yaml' + mining_result_file: str = '/out/result.tsv' + infer_result_file: str = '/out/infer-result.json' + monitor_file: str = '/out/monitor.txt' + executor_log_file: str = '/out/ymir-executor-out.log' + + +class _EnvConfig(BaseModel): + protocol_version = version.TMI_PROTOCOL_VERSION + task_id: str = 'default-task' + run_training: bool = False + run_mining: bool = False + run_infer: bool = False + + input: _EnvInputConfig = _EnvInputConfig() + output: _EnvOutputConfig = _EnvOutputConfig() + + +def generate_training_env_config_file(task_id: str, env_config_file_path: str) -> None: + env_config = _EnvConfig() + env_config.task_id = task_id + env_config.run_training = True + env_config.input.training_index_file = '/in/train-index.tsv' + env_config.input.val_index_file = '/in/val-index.tsv' + + with open(env_config_file_path, 'w') as f: + yaml.safe_dump(env_config.dict(), f) + + +def generate_mining_infer_env_config_file(task_id: str, run_mining: bool, run_infer: bool, + env_config_file_path: str) -> None: + # TODO: seperate command mining and infer + env_config = _EnvConfig() + env_config.task_id = task_id + env_config.run_mining = run_mining + env_config.run_infer = run_infer + env_config.input.candidate_index_file = '/in/candidate-index.tsv' + + with open(env_config_file_path, 'w') as f: + yaml.safe_dump(env_config.dict(), f) + + +def collect_executor_outlog_tail(work_dir: str, tail_line_count: int = 5) -> str: + out_log_path = os.path.join(work_dir, 'out', mir_settings.EXECUTOR_OUTLOG_NAME) + if not os.path.isfile(out_log_path): + return '' + + tail_lines = linecache.getlines(out_log_path)[-1 * tail_line_count:] + if not tail_lines: + return '' + + joint_tail_lines = ''.join(tail_lines) + return f"EXECUTOR OUTLOG TAIL FROM: {out_log_path}\n{joint_tail_lines}" diff --git a/ymir/command/mir/tools/executant.py b/ymir/command/mir/tools/executant.py new file mode 100644 index 0000000000..5e6260f2eb --- /dev/null +++ b/ymir/command/mir/tools/executant.py @@ -0,0 +1,170 @@ +import logging +import os +import subprocess +from typing import Dict, List + +from mir.tools import settings as mir_settings +from mir.tools.code import MirCode +from mir.tools.errors import MirRuntimeError +from requests.exceptions import ConnectionError, HTTPError, Timeout + + +def _execute_in_openpai( + work_dir_in: str, + work_dir_out: str, + executor: str, + executant_name: str, + executor_config: Dict, + gpu_id: str, + run_as_root: bool, + task_config: Dict, +) -> int: + # openpai_host = task_config.get("openpai_host", ""), + # openpai_token = task_config.get("openpai_token", ""), + # openpai_storage = task_config.get("openpai_storage", ""), + # openpai_user = task_config.get("openpai_user", ""), + + return _execute_locally( + work_dir_in=work_dir_in, + work_dir_out=work_dir_out, + executor=executor, + executant_name=executant_name, + executor_config=executor_config, + gpu_id=gpu_id, + run_as_root=run_as_root, + task_config=task_config, + ) + + +def _get_shm_size(executor_config: Dict, shm_size_count: int) -> str: + """ + shm_size_count: shm_size_count = gpu_count if use_gpu else 1 + """ + # increase share memory according to shm_size_count + if 'shm_size' in executor_config: + return executor_config['shm_size'] + else: + shm_size = 16 * shm_size_count + return f'{shm_size}G' + + +def _append_binds(cmd: List, bind_path: str) -> None: + if os.path.exists(bind_path) and os.path.islink(bind_path): + actual_bind_path = os.readlink(bind_path) + cmd.append(f"-v{actual_bind_path}:{actual_bind_path}") + + +def _get_docker_executable(runtime: str) -> str: + if runtime == 'nvidia': + return 'nvidia-docker' + return 'docker' + + +def _execute_locally( + work_dir_in: str, + work_dir_out: str, + executor: str, + executant_name: str, + executor_config: Dict, + gpu_id: str, + run_as_root: bool, + task_config: dict, +) -> int: + cmd = [_get_docker_executable(runtime=task_config.get('server_runtime', '')), 'run', '--rm'] + # path bindings + cmd.append(f"-v{work_dir_in}:/in:ro") + cmd.append(f"-v{work_dir_out}:/out") + # assets and tensorboard dir may be sym-links, check and mount on demands. + _append_binds(cmd, os.path.join(work_dir_in, 'assets')) + _append_binds(cmd, os.path.join(work_dir_in, 'models')) + _append_binds(cmd, os.path.join(work_dir_out, 'tensorboard')) + + # permissions and shared memory + if not run_as_root: + cmd.extend(['--user', f"{os.getuid()}:{os.getgid()}"]) + if gpu_id: + cmd.extend(['--gpus', f"\"device={gpu_id}\""]) + shm_size_count = len(gpu_id.split(',')) + else: + shm_size_count = 1 + cmd.append(f"--shm-size={_get_shm_size(executor_config=executor_config, shm_size_count=shm_size_count)}") + cmd.extend(['--name', executant_name]) + cmd.append(executor) + + out_log_path = os.path.join(work_dir_out, mir_settings.EXECUTOR_OUTLOG_NAME) + logging.info(f"starting {executant_name} docker container with cmd: {' '.join(cmd)}") + with open(out_log_path, 'a') as f: + # run and wait, if non-zero value returned, raise + subprocess.run(cmd, check=True, stdout=f, stderr=f, text=True) + + return MirCode.RC_OK + + +def prepare_executant_env(work_dir_in: str, + work_dir_out: str, + asset_cache_dir: str = None, + tensorboard_dir: str = None) -> None: + os.makedirs(work_dir_in, exist_ok=True) + # assets folder, fixed location at work_dir_in/assets. + asset_dir = os.path.join(work_dir_in, 'assets') + if asset_cache_dir: + if asset_cache_dir != asset_dir: + os.symlink(asset_cache_dir, asset_dir) + else: + os.makedirs(asset_dir, exist_ok=True) + work_dir_annotations = os.path.join(work_dir_in, 'annotations') + os.makedirs(work_dir_annotations, exist_ok=True) + work_dir_pred = os.path.join(work_dir_in, 'predictions') + os.makedirs(work_dir_pred, exist_ok=True) + work_dir_in_model = os.path.join(work_dir_in, 'models') + os.makedirs(work_dir_in_model, exist_ok=True) + + os.makedirs(work_dir_out, exist_ok=True) + out_model_dir = os.path.join(work_dir_out, 'models') + os.makedirs(out_model_dir, exist_ok=True) + # Build tensorbaord folder, fixed location at work_dir_out/tensorboard + tensorboard_dir_local = os.path.join(work_dir_out, 'tensorboard') + if tensorboard_dir: + if tensorboard_dir != tensorboard_dir_local: + os.system(f"chmod -R 777 {tensorboard_dir}") + os.symlink(tensorboard_dir, tensorboard_dir_local) + else: + os.makedirs(tensorboard_dir_local, exist_ok=True) + os.system(f"chmod -R 777 {work_dir_out}") + + +def run_docker_executant(work_dir_in: str, + work_dir_out: str, + executor: str, + executant_name: str, + executor_config: Dict, + gpu_id: str, + run_as_root: bool, + task_config: Dict = {}) -> int: + if task_config.get("openpai_enable", False): + logging.info(f"Run executor task {executant_name} on OpenPai.") + try: + return _execute_in_openpai( + work_dir_in=work_dir_in, + work_dir_out=work_dir_out, + executor=executor, + executant_name=executant_name, + executor_config=executor_config, + gpu_id=gpu_id, + run_as_root=run_as_root, + task_config=task_config, + ) + except (ConnectionError, HTTPError, Timeout): + raise MirRuntimeError(error_code=MirCode.RC_CMD_OPENPAI_ERROR, error_message='OpenPai Error') + else: + logging.info(f"Run executor task {executant_name} locally.") + return _execute_locally( + work_dir_in=work_dir_in, + work_dir_out=work_dir_out, + executor=executor, + executant_name=executant_name, + executor_config=executor_config, + gpu_id=gpu_id, + run_as_root=run_as_root, + task_config=task_config, + ) diff --git a/ymir/command/mir/tools/exporter.py b/ymir/command/mir/tools/exporter.py new file mode 100644 index 0000000000..63923c2121 --- /dev/null +++ b/ymir/command/mir/tools/exporter.py @@ -0,0 +1,512 @@ +import json +import os +import shutil +from typing import Callable, Dict, Optional, TextIO, Tuple +import uuid +import xml.etree.ElementTree as ElementTree + +from mir.tools.class_ids import UserLabels +from mir.tools.code import MirCode, time_it +from mir.tools import annotations, mir_storage +from mir.protos import mir_command_pb2 as mirpb +from mir.tools.errors import MirRuntimeError + + +def _asset_file_ext(asset_format: "mirpb.AssetType.V") -> str: + _asset_ext_map = { + mirpb.AssetType.AssetTypeImageJpeg: 'jpg', + mirpb.AssetType.AssetTypeImagePng: 'png', + mirpb.AssetType.AssetTypeImageBmp: 'bmp', + } + return _asset_ext_map.get(asset_format, "unknown") + + +def _anno_file_ext(anno_format: "mirpb.AnnoFormat.V") -> str: + _anno_ext_map = { + mirpb.AnnoFormat.AF_DET_ARK_JSON: 'txt', + mirpb.AnnoFormat.AF_DET_PASCAL_VOC: 'xml', + mirpb.AnnoFormat.AF_DET_LS_JSON: 'json', + mirpb.AnnoFormat.AF_SEG_POLYGON: 'xml', + mirpb.AnnoFormat.AF_SEG_MASK: 'png', + } + return _anno_ext_map.get(anno_format, "unknown") + + +def _format_file_output_func( + anno_format: "mirpb.AnnoFormat.V" +) -> Callable[[ + mirpb.MetadataAttributes, mirpb.SingleImageAnnotations, Optional[mirpb.SingleImageCks], Optional[Dict[ + int, int]], Optional[UserLabels], str, str +], None]: + _format_func_map = { + mirpb.AnnoFormat.AF_DET_ARK_JSON: _single_image_annotations_to_det_ark, + mirpb.AnnoFormat.AF_DET_PASCAL_VOC: _single_image_annotations_to_voc, + mirpb.AnnoFormat.AF_DET_LS_JSON: _single_image_annotations_to_det_ls_json, + mirpb.AnnoFormat.AF_SEG_POLYGON: _single_image_annotations_to_voc, + mirpb.AnnoFormat.AF_SEG_MASK: _single_image_annotations_to_seg_mask, + } + if anno_format not in _format_func_map: + raise NotImplementedError(f"unknown anno_format: {anno_format}") + return _format_func_map[anno_format] + + +def parse_asset_format(asset_format_str: str) -> "mirpb.AssetFormat.V": + _asset_dict: Dict[str, mirpb.AssetFormat.V] = { + "raw": mirpb.AssetFormat.AF_RAW, + "lmdb": mirpb.AssetFormat.AF_LMDB, + } + return _asset_dict.get(asset_format_str.lower(), mirpb.AssetFormat.AF_UNKNOWN) + + +def parse_export_type(type_str: str) -> Tuple["mirpb.AnnoFormat.V", "mirpb.AssetFormat.V"]: + if not type_str: + return (mirpb.AnnoFormat.AF_DET_PASCAL_VOC, mirpb.AssetFormat.AF_RAW) + + anno_str, asset_str = type_str.split(':') + return (annotations.parse_anno_format(anno_str), parse_asset_format(asset_str)) + + +def get_index_filename(is_asset: bool = True, + is_pred: bool = False, + tvt_type: Optional["mirpb.TvtType.V"] = None) -> str: + index_filename = "index.tsv" + if is_asset: + return index_filename + + if tvt_type: + _tvt_type_prefix: Dict["mirpb.TvtType.V", str] = { + mirpb.TvtType.TvtTypeTraining: "train", + mirpb.TvtType.TvtTypeValidation: "val", + mirpb.TvtType.TvtTypeTest: "test", + } + index_filename = f"{_tvt_type_prefix[tvt_type]}-{index_filename}" + + if is_pred: + index_filename = "pred-" + index_filename + + return index_filename + + +def _gen_abs_idx_file_path(abs_dir: str, + idx_prefix: str, + file_name: str, + file_ext: str, + need_sub_folder: bool,) -> Tuple[str, str]: + abs_path: str = mir_storage.get_asset_storage_path(location=abs_dir, + hash=file_name, + make_dirs=True, + need_sub_folder=need_sub_folder) + abs_file = f"{abs_path}.{file_ext}" + index_path: str = mir_storage.get_asset_storage_path(location=idx_prefix, + hash=file_name, + make_dirs=False, + need_sub_folder=need_sub_folder) + idx_file = f"{index_path}.{file_ext}" + return (abs_file, idx_file) + + +@time_it +def export_mirdatas_to_dir( + mir_metadatas: mirpb.MirMetadatas, + ec: mirpb.ExportConfig, + mir_annotations: Optional[mirpb.MirAnnotations] = None, + class_ids_mapping: Optional[Dict[int, int]] = None, + cls_id_mgr: Optional[UserLabels] = None, +) -> int: + if not (ec.asset_dir and ec.media_location and os.path.isdir(ec.media_location)): + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"invalid export config {ec}") + os.makedirs(ec.asset_dir, exist_ok=True) + + if ec.asset_format == mirpb.AssetFormat.AF_LMDB: + return _export_mirdatas_to_lmdb( + mir_metadatas=mir_metadatas, + ec=ec, + mir_annotations=mir_annotations, + class_ids_mapping=class_ids_mapping, + cls_id_mgr=cls_id_mgr, + ) + elif ec.asset_format == mirpb.AssetFormat.AF_RAW: + return _export_mirdatas_to_raw( + mir_metadatas=mir_metadatas, + ec=ec, + mir_annotations=mir_annotations, + class_ids_mapping=class_ids_mapping, + cls_id_mgr=cls_id_mgr, + ) + + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"unknown asset format: {ec.asset_format}") + + +def _export_mirdatas_to_raw( + mir_metadatas: mirpb.MirMetadatas, + ec: mirpb.ExportConfig, + mir_annotations: Optional[mirpb.MirAnnotations] = None, + class_ids_mapping: Optional[Dict[int, int]] = None, + cls_id_mgr: Optional[UserLabels] = None, +) -> int: + # Setup path and file handler. + ec.asset_index_file = ec.asset_index_file or os.path.join(ec.asset_dir, get_index_filename()) + ec.asset_index_prefix = ec.asset_index_prefix or ec.asset_dir + index_asset_f = open(ec.asset_index_file, 'w') + + if ec.gt_dir: + if not ec.gt_index_file: + ec.gt_index_file = os.path.join(ec.gt_dir, get_index_filename()) + os.makedirs(ec.gt_dir, exist_ok=True) + index_gt_f = open(ec.gt_index_file, 'w') + ec.gt_index_prefix = ec.gt_index_prefix or ec.gt_dir + + if ec.pred_dir: + if not ec.pred_index_file: + ec.pred_index_file = os.path.join(ec.pred_dir, get_index_filename()) + os.makedirs(ec.pred_dir, exist_ok=True) + index_pred_f = open(ec.pred_index_file, 'w') + ec.pred_index_prefix = ec.pred_index_prefix or ec.pred_dir + + index_tvt_f: Dict[Tuple[bool, "mirpb.TvtType.V"], TextIO] = {} + if ec.tvt_index_dir: + os.makedirs(ec.tvt_index_dir, exist_ok=True) + for is_pred in [True, False]: + for tvt_type in [mirpb.TvtType.TvtTypeTraining, mirpb.TvtType.TvtTypeValidation, mirpb.TvtType.TvtTypeTest]: + file_name = get_index_filename(is_asset=False, is_pred=is_pred, tvt_type=tvt_type) + index_tvt_f[(is_pred, tvt_type)] = open(os.path.join(ec.tvt_index_dir, file_name), 'w') + + for asset_id, attributes in mir_metadatas.attributes.items(): + # export asset. + asset_src_file: str = mir_storage.locate_asset_path(location=ec.media_location, hash=asset_id) + asset_abs_file, asset_idx_file = _gen_abs_idx_file_path(abs_dir=ec.asset_dir, + idx_prefix=ec.asset_index_prefix, + file_name=asset_id, + file_ext=_asset_file_ext(attributes.asset_type), + need_sub_folder=ec.need_sub_folder) + if not os.path.isfile(asset_abs_file) or os.stat(asset_src_file).st_size != os.stat(asset_abs_file).st_size: + shutil.copyfile(asset_src_file, asset_abs_file) + index_asset_f.write(f"{asset_idx_file}\n") + + if ec.anno_format == mirpb.AnnoFormat.AF_NO_ANNOTATION: + continue + + if ec.gt_dir and mir_annotations: + # export annotation file even annotation not exists. + if asset_id in mir_annotations.ground_truth.image_annotations: + image_annotations = mir_annotations.ground_truth.image_annotations[asset_id] + else: + image_annotations = mirpb.SingleImageAnnotations() + + gt_abs_file, gt_idx_file = _gen_abs_idx_file_path(abs_dir=ec.gt_dir, + idx_prefix=ec.gt_index_prefix, + file_name=asset_id, + file_ext=_anno_file_ext(anno_format=ec.anno_format), + need_sub_folder=ec.need_sub_folder) + _export_anno_to_file( + anno_dst_file=gt_abs_file, + anno_format=ec.anno_format, + attributes=attributes, + image_annotations=image_annotations, + image_cks=mir_annotations.image_cks[asset_id], + class_ids_mapping=class_ids_mapping, + cls_id_mgr=cls_id_mgr, + asset_filename=asset_idx_file, + ) + asset_anno_pair_line = f"{asset_idx_file}\t{gt_idx_file}\n" + index_gt_f.write(asset_anno_pair_line) + if ec.tvt_index_dir: + index_tvt_f[(False, attributes.tvt_type)].write(asset_anno_pair_line) + + if ec.pred_dir and mir_annotations: + # export annotation file even annotation not exists. + if asset_id in mir_annotations.prediction.image_annotations: + image_annotations = mir_annotations.prediction.image_annotations[asset_id] + else: + image_annotations = mirpb.SingleImageAnnotations() + + pred_abs_file, pred_idx_file = _gen_abs_idx_file_path(abs_dir=ec.pred_dir, + idx_prefix=ec.pred_index_prefix, + file_name=asset_id, + file_ext=_anno_file_ext(anno_format=ec.anno_format), + need_sub_folder=ec.need_sub_folder) + _export_anno_to_file( + anno_dst_file=pred_abs_file, + anno_format=ec.anno_format, + attributes=attributes, + image_annotations=image_annotations, + image_cks=None, + class_ids_mapping=class_ids_mapping, + cls_id_mgr=cls_id_mgr, + asset_filename=asset_idx_file, + ) + asset_anno_pair_line = f"{asset_idx_file}\t{pred_idx_file}\n" + index_pred_f.write(asset_anno_pair_line) + if ec.tvt_index_dir: + index_tvt_f[(True, attributes.tvt_type)].write(asset_anno_pair_line) + + # write labelmap.txt. + if ec.gt_dir and mir_annotations and mir_annotations.ground_truth.map_id_color: + if not cls_id_mgr: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message="cls_id_mgr is not set in exporter.") + labelmap_file = os.path.join(ec.gt_dir, 'labelmap.txt') + with open(labelmap_file, 'w') as f: + cids = sorted(mir_annotations.ground_truth.map_id_color.keys()) + for cid in cids: + point = mir_annotations.ground_truth.map_id_color[cid] + color = f"{point.x},{point.y},{point.z}" + f.write(f"{cls_id_mgr.main_name_for_id(cid)}:{color}::\n") + if ec.pred_dir and mir_annotations and mir_annotations.prediction.map_id_color: + if not cls_id_mgr: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message="cls_id_mgr is not set in exporter.") + labelmap_file = os.path.join(ec.pred_dir, 'labelmap.txt') + with open(labelmap_file, 'w') as f: + cids = sorted(mir_annotations.prediction.map_id_color.keys()) + for cid in cids: + point = mir_annotations.prediction.map_id_color[cid] + color = f"{point.x},{point.y},{point.z}" + f.write(f"{cls_id_mgr.main_name_for_id(cid)}:{color}::\n") + + index_asset_f.close() + # Clean up. + if ec.gt_dir: + index_gt_f.close() + if ec.pred_dir: + index_pred_f.close() + for single_idx_f in index_tvt_f.values(): + single_idx_f.close() + + return MirCode.RC_OK + + +def _export_mirdatas_to_lmdb( + mir_metadatas: mirpb.MirMetadatas, + ec: mirpb.ExportConfig, + mir_annotations: Optional[mirpb.MirAnnotations] = None, + class_ids_mapping: Optional[Dict[int, int]] = None, + cls_id_mgr: Optional[UserLabels] = None, +) -> int: + raise NotImplementedError("LMDB format is not supported yet.") + + +def _export_anno_to_file(anno_dst_file: str, anno_format: "mirpb.AnnoFormat.V", + attributes: mirpb.MetadataAttributes, image_annotations: mirpb.SingleImageAnnotations, + image_cks: Optional[mirpb.SingleImageCks], class_ids_mapping: Optional[Dict[int, int]], + cls_id_mgr: Optional[UserLabels], asset_filename: str) -> None: + format_func = _format_file_output_func(anno_format=anno_format) + format_func(attributes, + image_annotations, + image_cks, + class_ids_mapping, + cls_id_mgr, + asset_filename, + anno_dst_file) + + +def _single_image_annotations_to_det_ark(attributes: mirpb.MetadataAttributes, + image_annotations: mirpb.SingleImageAnnotations, + image_cks: Optional[mirpb.SingleImageCks], + class_ids_mapping: Optional[Dict[int, int]], + cls_id_mgr: Optional[UserLabels], asset_filename: str, + anno_dst_file: str) -> None: + output_str = "" + for annotation in image_annotations.boxes: + if class_ids_mapping and annotation.class_id not in class_ids_mapping: + continue + + mapped_id = class_ids_mapping[annotation.class_id] if class_ids_mapping else annotation.class_id + output_str += f"{mapped_id}, {annotation.box.x}, {annotation.box.y}, " + output_str += f"{annotation.box.x + annotation.box.w - 1}, {annotation.box.y + annotation.box.h - 1}, " + output_str += f"{annotation.anno_quality}, {annotation.box.rotate_angle}\n" + + with open(anno_dst_file, 'w') as af: + af.write(output_str) + + +def _single_image_annotations_to_voc(attributes: mirpb.MetadataAttributes, + image_annotations: mirpb.SingleImageAnnotations, + image_cks: Optional[mirpb.SingleImageCks], + class_ids_mapping: Optional[Dict[int, int]], cls_id_mgr: Optional[UserLabels], + asset_filename: str, anno_dst_file: str) -> None: + if not cls_id_mgr: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="invalid cls_id_mgr.") + + annotations = image_annotations.boxes + + # annotation + annotation_node = ElementTree.Element('annotation') + + # annotation: folder + folder_node = ElementTree.SubElement(annotation_node, 'folder') + folder_node.text = 'folder' + + # annotation: filename + filename_node = ElementTree.SubElement(annotation_node, 'filename') + filename_node.text = asset_filename + + # annotation: source + source_node = ElementTree.SubElement(annotation_node, 'source') + + # annotation: source: annotation + annotation2_node = ElementTree.SubElement(source_node, 'annotation') + annotation2_node.text = 'unknown' + + # annotation: source: image + image_node = ElementTree.SubElement(source_node, 'image') + image_node.text = 'unknown' + + # annotation: size + size_node = ElementTree.SubElement(annotation_node, 'size') + + # annotation: size: width + width_node = ElementTree.SubElement(size_node, 'width') + width_node.text = str(attributes.width) + + # annotation: size: height + height_node = ElementTree.SubElement(size_node, 'height') + height_node.text = str(attributes.height) + + # annotation: size: depth + depth_node = ElementTree.SubElement(size_node, 'depth') + depth_node.text = str(attributes.image_channels) + + # annotation: segmented + segmented_node = ElementTree.SubElement(annotation_node, 'segmented') + segmented_node.text = '0' + + # annotation: cks and sub nodes + if image_cks: + if image_cks.cks: + cks_node = ElementTree.SubElement(annotation_node, 'cks') + for k, v in image_cks.cks.items(): + ElementTree.SubElement(cks_node, k).text = v + + # annotation: image_quality + image_quality_node = ElementTree.SubElement(annotation_node, 'image_quality') + image_quality_node.text = f"{image_cks.image_quality:.4f}" + + # annotation: object(s) + for annotation in annotations: + if class_ids_mapping and annotation.class_id not in class_ids_mapping: + continue + + object_node = ElementTree.SubElement(annotation_node, 'object') + + name_node = ElementTree.SubElement(object_node, 'name') + name_node.text = cls_id_mgr.main_name_for_id(annotation.class_id) + + pose_node = ElementTree.SubElement(object_node, 'pose') + pose_node.text = 'unknown' + + truncated_node = ElementTree.SubElement(object_node, 'truncated') + truncated_node.text = 'unknown' + + occluded_node = ElementTree.SubElement(object_node, 'occluded') + occluded_node.text = '0' + + w, h = annotation.box.w, annotation.box.h + if w and h: # det box + bndbox_node = ElementTree.SubElement(object_node, 'bndbox') + + xmin_node = ElementTree.SubElement(bndbox_node, 'xmin') + xmin_node.text = str(annotation.box.x) + + ymin_node = ElementTree.SubElement(bndbox_node, 'ymin') + ymin_node.text = str(annotation.box.y) + + xmax_node = ElementTree.SubElement(bndbox_node, 'xmax') + xmax_node.text = str(annotation.box.x + w - 1) + + ymax_node = ElementTree.SubElement(bndbox_node, 'ymax') + ymax_node.text = str(annotation.box.y + h - 1) + + rotate_angle_node = ElementTree.SubElement(bndbox_node, 'rotate_angle') + rotate_angle_node.text = f"{annotation.box.rotate_angle:.4f}" + elif len(annotation.polygon) > 0: # seg polygon + raise NotImplementedError + + difficult_node = ElementTree.SubElement(object_node, 'difficult') + difficult_node.text = '0' + + if annotation.tags: # Not add tags node if empty, otherwise xmlparse lib will get tags: None. + tags_node = ElementTree.SubElement(object_node, 'tags') + for k, v in annotation.tags.items(): + ElementTree.SubElement(tags_node, k).text = v + + box_quality_node = ElementTree.SubElement(object_node, 'box_quality') + box_quality_node.text = f"{annotation.anno_quality:.4f}" + + if annotation.cm != mirpb.ConfusionMatrixType.NotSet: + cm_node = ElementTree.SubElement(object_node, 'cm') + cm_node.text = f"{mirpb.ConfusionMatrixType.Name(annotation.cm)}" + + confidence_node = ElementTree.SubElement(object_node, 'confidence') + confidence_node.text = f"{annotation.score:.4f}" + + with open(anno_dst_file, 'w') as af: + af.write(ElementTree.tostring(element=annotation_node, encoding='unicode')) + + +def _single_image_annotations_to_det_ls_json(attributes: mirpb.MetadataAttributes, + image_annotations: mirpb.SingleImageAnnotations, + image_cks: Optional[mirpb.SingleImageCks], + class_ids_mapping: Optional[Dict[int, int]], + cls_id_mgr: Optional[UserLabels], asset_filename: str, + anno_dst_file: str) -> None: + if not cls_id_mgr: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="invalid cls_id_mgr.") + + annotations = image_annotations.boxes + + out_type = "predictions" # out_type: annotation type - "annotations" or "predictions" + to_name = 'image' # to_name: object name from Label Studio labeling config + from_name = 'label' # control tag name from Label Studio labeling config + task: Dict = { + out_type: [{ + "result": [], + "ground_truth": False, + }], + "data": { + "image": asset_filename + } + } + + for annotation in annotations: + if class_ids_mapping and annotation.class_id not in class_ids_mapping: + continue + + bbox_x, bbox_y = float(annotation.box.x), float(annotation.box.y) + bbox_width, bbox_height = float(annotation.box.w), float(annotation.box.h) + img_width, img_height = attributes.width, attributes.height + item = { + "id": uuid.uuid4().hex[0:10], # random id to identify this annotation. + "type": "rectanglelabels", + "value": { + # Units of image annotations in label studio is percentage of image width/height. + # https://labelstud.io/guide/predictions.html#Units-of-image-annotations + "x": bbox_x / img_width * 100, + "y": bbox_y / img_height * 100, + "width": bbox_width / img_width * 100, + "height": bbox_height / img_height * 100, + "rotation": 0, + "rectanglelabels": [cls_id_mgr.main_name_for_id(annotation.class_id)] + }, + "to_name": to_name, + "from_name": from_name, + "image_rotation": 0, + "original_width": img_width, + "original_height": img_height + } + task[out_type][0]['result'].append(item) + + with open(anno_dst_file, 'w') as af: + af.write(json.dumps(task)) + + +def _single_image_annotations_to_seg_mask(attributes: mirpb.MetadataAttributes, + image_annotations: mirpb.SingleImageAnnotations, + image_cks: Optional[mirpb.SingleImageCks], + class_ids_mapping: Optional[Dict[int, int]], + cls_id_mgr: Optional[UserLabels], + asset_filename: str, anno_dst_file: str) -> None: + with open(anno_dst_file, 'wb') as af: + af.write(image_annotations.mask.semantic_mask) diff --git a/ymir/command/mir/tools/hash_utils.py b/ymir/command/mir/tools/hash_utils.py deleted file mode 100644 index 6175a2ba83..0000000000 --- a/ymir/command/mir/tools/hash_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -import hashlib - - -def sha1sum_for_file(file_path: str) -> str: - """ - get sha1sum for file, raises FileNotFoundError if file not found - """ - h = hashlib.sha1() - with open(file_path, "rb") as f: - chunk = b'0' - while chunk != b'': - chunk = f.read(h.block_size) - h.update(chunk) - return h.hexdigest() diff --git a/ymir/command/mir/tools/metadatas.py b/ymir/command/mir/tools/metadatas.py index b0f87f0e86..dde1d1914a 100644 --- a/ymir/command/mir/tools/metadatas.py +++ b/ymir/command/mir/tools/metadatas.py @@ -2,7 +2,8 @@ import os import time from PIL import Image, ImageFile, UnidentifiedImageError -from typing import Tuple +from typing import Dict +from mir.tools import mir_storage from mir.tools.code import MirCode from mir.tools.errors import MirRuntimeError @@ -12,48 +13,6 @@ ImageFile.LOAD_TRUNCATED_IMAGES = True -def _generate_metadata_mir_pb(mir_metadatas: mirpb.MirMetadatas, dataset_name: str, sha1s: list, hashed_asset_root: str, - phase: str) -> int: - """ - generate mirpb.MirMetadatas from sha1s - """ - current_timestamp = int(time.time()) # this is a fake timestamp - timestamp = mirpb.Timestamp() - timestamp.start = current_timestamp - timestamp.duration = 0 # image has no duraton - - unknown_format_count = 0 - - sha1s_count = len(sha1s) - for idx, val in enumerate(sha1s): - metadata_attributes = mirpb.MetadataAttributes() - metadata_attributes.timestamp.CopyFrom(timestamp) - metadata_attributes.dataset_name = dataset_name - - # read file - # if any exception occured, exit without any handler - hashed_asset_path = os.path.join(hashed_asset_root, val) - asset_type, width, height, channel = _type_shape_for_asset(hashed_asset_path) - if asset_type == mirpb.AssetTypeUnknown: - logging.warning(f"ignore asset with unknown format, id: {val}") - unknown_format_count += 1 - continue - metadata_attributes.asset_type = asset_type - metadata_attributes.width = width - metadata_attributes.height = height - metadata_attributes.image_channels = channel - - mir_metadatas.attributes[val].CopyFrom(metadata_attributes) - - if idx > 0 and idx % 5000 == 0: - PhaseLoggerCenter.update_phase(phase=phase, local_percent=(idx / sha1s_count)) - - if unknown_format_count > 0: - logging.warning(f"unknown format asset count: {unknown_format_count}") - - return MirCode.RC_OK - - _ASSET_TYPE_STR_TO_ENUM_MAPPING = { 'jpeg': mirpb.AssetTypeImageJpeg, 'jpg': mirpb.AssetTypeImageJpeg, @@ -62,34 +21,39 @@ def _generate_metadata_mir_pb(mir_metadatas: mirpb.MirMetadatas, dataset_name: s } -def _type_shape_for_asset(asset_path: str) -> Tuple['mirpb.AssetType.V', int, int, int]: +def _fill_type_shape_size_for_asset(asset_path: str, metadata_attributes: mirpb.MetadataAttributes) -> None: if not asset_path: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='_type_shape_for_asset: empty asset_path') + error_message='_type_shape_size_for_asset: empty asset_path') try: asset_image = Image.open(asset_path) asset_type_str: str = asset_image.format.lower() # type: ignore - except UnidentifiedImageError as e: + except (UnidentifiedImageError, OSError) as e: logging.info(f"{type(e).__name__}: {e} asset_path: {asset_path}") asset_type_str = '' # didn't set it to 'unknown' as what i did in utils.py, because this is easy to compare + width, height, channel = 0, 0, 0 + asset_type = mirpb.AssetTypeUnknown if asset_type_str in _ASSET_TYPE_STR_TO_ENUM_MAPPING: width, height = asset_image.size channel = len(asset_image.getbands()) - return (_ASSET_TYPE_STR_TO_ENUM_MAPPING[asset_type_str], width, height, channel) - else: - return (mirpb.AssetTypeUnknown, 0, 0, 0) + asset_type = _ASSET_TYPE_STR_TO_ENUM_MAPPING[asset_type_str] + + metadata_attributes.asset_type = asset_type + metadata_attributes.width = width + metadata_attributes.height = height + metadata_attributes.image_channels = channel + metadata_attributes.byte_size = os.stat(asset_path).st_size def import_metadatas(mir_metadatas: mirpb.MirMetadatas, - dataset_name: str, - in_sha1_path: str, + map_hashed_filename: Dict[str, str], hashed_asset_root: str, phase: str = '') -> int: # if not enough args, abort - if (not in_sha1_path or not dataset_name or not hashed_asset_root): - logging.error('invalid in_sha1_path, dataset_name or hashed_asset_root') + if (not map_hashed_filename or not hashed_asset_root): + logging.error('invalid map_hashed_path or hashed_asset_root') return MirCode.RC_CMD_INVALID_ARGS if not mir_metadatas: @@ -97,24 +61,33 @@ def import_metadatas(mir_metadatas: mirpb.MirMetadatas, logging.error('mir_metadatas empty') return MirCode.RC_CMD_INVALID_MIR_REPO - # read sha1 - sha1s = [] - with open(in_sha1_path, "r") as in_file: - for line in in_file.readlines(): - if not line or not line.strip(): - continue - line_components = line.strip().split() - if not line_components[0]: - continue - sha1s.append(line_components[0]) - if not sha1s: - logging.error(f"no sha1s found in {in_sha1_path}, exit") - return MirCode.RC_CMD_INVALID_ARGS + current_timestamp = int(time.time()) # this is a fake timestamp + timestamp = mirpb.Timestamp() + timestamp.start = current_timestamp + timestamp.duration = 0 # image has no duraton + + unknown_format_count = 0 + + sha1s_count = len(map_hashed_filename) + for idx, asset_id in enumerate(map_hashed_filename.keys()): + metadata_attributes = mirpb.MetadataAttributes() + metadata_attributes.timestamp.CopyFrom(timestamp) - # generate mir_metadatas - ret = _generate_metadata_mir_pb(mir_metadatas=mir_metadatas, - dataset_name=dataset_name, - sha1s=sha1s, - hashed_asset_root=hashed_asset_root, - phase=phase) - return ret + # read file + # if any exception occured, exit without any handler + hashed_asset_path = mir_storage.locate_asset_path(location=hashed_asset_root, hash=asset_id) + _fill_type_shape_size_for_asset(hashed_asset_path, metadata_attributes) + if metadata_attributes.asset_type == mirpb.AssetTypeUnknown: + logging.warning(f"ignore asset with unknown format, id: {asset_id}") + unknown_format_count += 1 + continue + metadata_attributes.origin_filename = map_hashed_filename[asset_id] + mir_metadatas.attributes[asset_id].CopyFrom(metadata_attributes) + + if idx > 0 and idx % 5000 == 0: + PhaseLoggerCenter.update_phase(phase=phase, local_percent=(idx / sha1s_count)) + + if unknown_format_count > 0: + logging.warning(f"unknown format asset count: {unknown_format_count}") + + return MirCode.RC_OK diff --git a/ymir/command/mir/tools/mir_storage.py b/ymir/command/mir/tools/mir_storage.py index a24f3f530c..4166202b03 100644 --- a/ymir/command/mir/tools/mir_storage.py +++ b/ymir/command/mir/tools/mir_storage.py @@ -1,9 +1,12 @@ +import hashlib +import os from typing import Any, List from mir.protos import mir_command_pb2 as mirpb +from mir.tools.errors import MirCode, MirRuntimeError -MIR_ASSOCIATED_FILES = ['.git', '.mir', '.gitignore', '.mir_lock'] +MIR_ASSOCIATED_FILES = ['.git', '.gitattributes', '.gitignore', '.mir', '.mir_lock'] def mir_type(mir_storage: 'mirpb.MirStorage.V') -> Any: @@ -28,17 +31,6 @@ def mir_path(mir_storage: 'mirpb.MirStorage.V') -> str: return MIR_PATH[mir_storage] -def mir_attr_name(mir_storage: 'mirpb.MirStorage.V') -> str: - MIR_STORAGE_TO_ATTR_NAME = { - mirpb.MirStorage.MIR_METADATAS: 'mir_metadatas', - mirpb.MirStorage.MIR_ANNOTATIONS: 'mir_annotations', - mirpb.MirStorage.MIR_KEYWORDS: 'mir_keywords', - mirpb.MirStorage.MIR_TASKS: 'mir_tasks', - mirpb.MirStorage.MIR_CONTEXT: 'mir_context', - } - return MIR_STORAGE_TO_ATTR_NAME[mir_storage] - - def get_all_mir_paths() -> List[str]: return [mir_path(ms) for ms in get_all_mir_storage()] @@ -51,3 +43,39 @@ def get_all_mir_storage() -> List['mirpb.MirStorage.V']: mirpb.MirStorage.MIR_TASKS, mirpb.MirStorage.MIR_CONTEXT, ] + + +# assets +def locate_asset_path(location: str, hash: str) -> str: + asset_path = get_asset_storage_path(location=location, hash=hash, make_dirs=False, need_sub_folder=True) + if os.path.isfile(asset_path): + return asset_path + + asset_path = get_asset_storage_path(location=location, hash=hash, make_dirs=False, need_sub_folder=False) + if os.path.isfile(asset_path): + return asset_path + + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message=f"cannot locate asset: {hash}") + + +def get_asset_storage_path(location: str, hash: str, make_dirs: bool = True, need_sub_folder: bool = True) -> str: + if not need_sub_folder: + return os.path.join(location, hash) + + sub_dir = os.path.join(location, hash[-2:]) + if make_dirs: + os.makedirs(sub_dir, exist_ok=True) + return os.path.join(sub_dir, hash) + + +def sha1sum_for_file(file_path: str) -> str: + """ + get sha1sum for file, raises FileNotFoundError if file not found + """ + h = hashlib.sha1() + with open(file_path, "rb") as f: + chunk = b'0' + while chunk != b'': + chunk = f.read(h.block_size) + h.update(chunk) + return h.hexdigest() diff --git a/ymir/command/mir/tools/mir_storage_ops.py b/ymir/command/mir/tools/mir_storage_ops.py index 343012aa40..c311ec4f4f 100644 --- a/ymir/command/mir/tools/mir_storage_ops.py +++ b/ymir/command/mir/tools/mir_storage_ops.py @@ -1,126 +1,150 @@ +from functools import reduce +from math import ceil import os import time -from typing import Any, List, Dict, Optional, Set +from typing import Any, List, Dict, Optional import fasteners # type: ignore from google.protobuf import json_format -import yaml from mir import scm from mir.commands.checkout import CmdCheckout from mir.commands.commit import CmdCommit from mir.protos import mir_command_pb2 as mirpb -from mir.tools import class_ids, context, exodus, mir_storage, mir_repo_utils, revs_parser, settings as mir_settings -from mir.tools.code import MirCode -from mir.tools.errors import MirError, MirRuntimeError +from mir.tools import det_eval_ops, exodus +from mir.tools import mir_storage, mir_repo_utils, revs_parser +from mir.tools import settings as mir_settings +from mir.tools.code import MirCode, time_it +from mir.tools.errors import MirRuntimeError + + +def create_evaluate_config(conf_thr: float = mir_settings.DEFAULT_EVALUATE_CONF_THR, + iou_thrs: str = mir_settings.DEFAULT_EVALUATE_IOU_THR, + need_pr_curve: bool = False, + class_ids: List[int] = []) -> mirpb.EvaluateConfig: + evaluate_config = mirpb.EvaluateConfig() + evaluate_config.conf_thr = conf_thr + evaluate_config.iou_thrs_interval = iou_thrs + evaluate_config.need_pr_curve = need_pr_curve + evaluate_config.class_ids[:] = class_ids + return evaluate_config class MirStorageOps(): # private: save and load @classmethod - def __save(cls, mir_root: str, mir_datas: Dict['mirpb.MirStorage.V', Any]) -> None: + def __build_task_keyword_context(cls, mir_datas: Dict['mirpb.MirStorage.V', Any], task: mirpb.Task, + evaluate_config: mirpb.EvaluateConfig) -> None: # add default members - mir_tasks: mirpb.MirTasks = mir_datas[mirpb.MirStorage.MIR_TASKS] - if mirpb.MirStorage.MIR_METADATAS not in mir_datas: - mir_datas[mirpb.MirStorage.MIR_METADATAS] = mirpb.MirMetadatas() - if mirpb.MirStorage.MIR_ANNOTATIONS not in mir_datas: - mir_datas[mirpb.MirStorage.MIR_ANNOTATIONS] = mirpb.MirAnnotations() - + mir_metadatas: mirpb.MirMetadatas = mir_datas[mirpb.MirStorage.MIR_METADATAS] mir_annotations: mirpb.MirAnnotations = mir_datas[mirpb.MirStorage.MIR_ANNOTATIONS] - cls.__build_annotations_head_task_id(mir_annotations=mir_annotations, head_task_id=mir_tasks.head_task_id) + mir_annotations.prediction.task_id = task.task_id + mir_annotations.ground_truth.task_id = task.task_id + + # build mir_tasks + mir_tasks: mirpb.MirTasks = mirpb.MirTasks() + mir_tasks.head_task_id = task.task_id + mir_tasks.tasks[mir_tasks.head_task_id].CopyFrom(task) + evaluation = det_eval_ops.det_evaluate_with_pb( + prediction=mir_annotations.prediction, + ground_truth=mir_annotations.ground_truth, + config=evaluate_config, + ) + mir_tasks.tasks[mir_tasks.head_task_id].evaluation.CopyFrom(evaluation) + mir_datas[mirpb.MirStorage.MIR_TASKS] = mir_tasks # gen mir_keywords mir_keywords: mirpb.MirKeywords = mirpb.MirKeywords() - cls.__build_mir_keywords(single_task_annotations=mir_annotations.task_annotations[mir_annotations.head_task_id], - mir_keywords=mir_keywords) + cls.__build_mir_keywords_ci_tag(task_annotations=mir_annotations.prediction, + keyword_to_index=mir_keywords.pred_idx) + cls.__build_mir_keywords_ci_tag(task_annotations=mir_annotations.ground_truth, + keyword_to_index=mir_keywords.gt_idx) + # ck to assets + for asset_id, image_cks in mir_annotations.image_cks.items(): + for k, v in image_cks.cks.items(): + mir_keywords.ck_idx[k].asset_annos[asset_id] # empty record to asset id + mir_keywords.ck_idx[k].sub_indexes[v].key_ids[asset_id] # empty record to asset id mir_datas[mirpb.MirStorage.MIR_KEYWORDS] = mir_keywords # gen mir_context - project_class_ids = context.load(mir_root=mir_root) mir_context = mirpb.MirContext() - cls.__build_mir_context(mir_metadatas=mir_datas[mirpb.MirStorage.MIR_METADATAS], + cls.__build_mir_context(mir_metadatas=mir_metadatas, mir_annotations=mir_annotations, mir_keywords=mir_keywords, - project_class_ids=project_class_ids, mir_context=mir_context) mir_datas[mirpb.MirStorage.MIR_CONTEXT] = mir_context - # save to file - for ms, mir_data in mir_datas.items(): - mir_file_path = os.path.join(mir_root, mir_storage.mir_path(ms)) - with open(mir_file_path, "wb") as m_f: - m_f.write(mir_data.SerializeToString()) - @classmethod - # public: presave actions - def __build_annotations_head_task_id(cls, mir_annotations: mirpb.MirAnnotations, head_task_id: str) -> None: - task_annotations_count = len(mir_annotations.task_annotations) - if task_annotations_count == 0: - mir_annotations.task_annotations[head_task_id].CopyFrom(mirpb.SingleTaskAnnotations()) - elif task_annotations_count == 1: - task_id = list(mir_annotations.task_annotations.keys())[0] - if task_id != head_task_id: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"annotation head task id mismatch: {head_task_id} != {task_id}") - elif task_annotations_count > 1: - # * now we allows only one task id in each mir_annotations - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_MIR_REPO, - error_message='more then one task ids found in mir_annotations') - - mir_annotations.head_task_id = head_task_id + @time_it + def __build_mir_keywords_ci_tag(cls, task_annotations: mirpb.SingleTaskAnnotations, + keyword_to_index: mirpb.CiTagToIndex) -> None: + task_cis = set() + for asset_id, single_image_annotations in task_annotations.image_annotations.items(): + image_cis = set() + for annotation in single_image_annotations.boxes: + image_cis.add(annotation.class_id) + # ci to annos + keyword_to_index.cis[annotation.class_id].key_ids[asset_id].ids.append(annotation.index) + + # tags to annos + for k, v in annotation.tags.items(): + keyword_to_index.tags[k].asset_annos[asset_id].ids.append(annotation.index) + keyword_to_index.tags[k].sub_indexes[v].key_ids[asset_id].ids.append(annotation.index) + + single_image_annotations.img_class_ids[:] = image_cis + task_cis.update(image_cis) + + task_annotations.task_class_ids[:] = task_cis @classmethod - def __build_mir_keywords(cls, single_task_annotations: mirpb.SingleTaskAnnotations, - mir_keywords: mirpb.MirKeywords) -> None: - """ - build mir_keywords from single_task_annotations + def __build_mir_context_stats(cls, anno_stats: mirpb.AnnoStats, mir_metadatas: mirpb.MirMetadatas, + task_annotations: mirpb.SingleTaskAnnotations, + keyword_to_index: mirpb.CiTagToIndex) -> None: + image_annotations = task_annotations.image_annotations - Args: - single_task_annotations (mirpb.SingleTaskAnnotations) - mir_keywords (mirpb.MirKeywords) - """ - # build mir_keywords.keywords - for asset_id, single_image_annotations in single_task_annotations.image_annotations.items(): - mir_keywords.keywords[asset_id].predifined_keyids[:] = set( - [annotation.class_id for annotation in single_image_annotations.annotations]) + anno_stats.eval_class_ids[:] = task_annotations.eval_class_ids + + # anno_stats.asset_cnt + anno_stats.positive_asset_cnt = len(image_annotations) + anno_stats.negative_asset_cnt = len(mir_metadatas.attributes) - len(image_annotations) + + anno_stats.total_cnt = sum([len(image_annotation.boxes) for image_annotation in image_annotations.values()]) - # build mir_keywords.index_predifined_keyids - mir_keywords.index_predifined_keyids.clear() + # anno_stats.cis_cnt + for ci, ci_assets in keyword_to_index.cis.items(): + anno_stats.class_ids_cnt[ci] = len(ci_assets.key_ids) - for asset_id, keywords in mir_keywords.keywords.items(): - for key_id in keywords.predifined_keyids: - mir_keywords.index_predifined_keyids[key_id].asset_ids.append(asset_id) + # anno_stats.tags_cnt + for tag, tag_to_annos in keyword_to_index.tags.items(): + for anno_idxes in tag_to_annos.asset_annos.values(): + anno_stats.tags_cnt[tag].cnt += len(anno_idxes.ids) - # Remove redundant index values and sort - for key_id, assets in mir_keywords.index_predifined_keyids.items(): - mir_keywords.index_predifined_keyids[key_id].asset_ids[:] = sorted( - set(mir_keywords.index_predifined_keyids[key_id].asset_ids)) + for sub_tag, sub_tag_to_annos in tag_to_annos.sub_indexes.items(): + for anno_idxes in sub_tag_to_annos.key_ids.values(): + anno_stats.tags_cnt[tag].sub_cnt[sub_tag] += len(anno_idxes.ids) @classmethod + @time_it def __build_mir_context(cls, mir_metadatas: mirpb.MirMetadatas, mir_annotations: mirpb.MirAnnotations, - mir_keywords: mirpb.MirKeywords, project_class_ids: List[int], - mir_context: mirpb.MirContext) -> None: - for key_id, assets in mir_keywords.index_predifined_keyids.items(): - mir_context.predefined_keyids_cnt[key_id] = len(assets.asset_ids) - - # project_predefined_keyids_cnt: assets count for project class ids - # suppose we have: 13 images for key 5, 15 images for key 6, and proejct_class_ids = [3, 5] - # project_predefined_keyids_cnt should be: {3: 0, 5: 13} - project_positive_asset_ids: Set[str] = set() - for key_id in project_class_ids: - if key_id in mir_context.predefined_keyids_cnt: - mir_context.project_predefined_keyids_cnt[key_id] = mir_context.predefined_keyids_cnt[key_id] - project_positive_asset_ids.update(mir_keywords.index_predifined_keyids[key_id].asset_ids) - else: - mir_context.project_predefined_keyids_cnt[key_id] = 0 - - # image_cnt, negative_images_cnt, project_negative_images_cnt + mir_keywords: mirpb.MirKeywords, mir_context: mirpb.MirContext) -> None: mir_context.images_cnt = len(mir_metadatas.attributes) - mir_context.negative_images_cnt = mir_context.images_cnt - len( - mir_annotations.task_annotations[mir_annotations.head_task_id].image_annotations) - if project_class_ids: - mir_context.project_negative_images_cnt = mir_context.images_cnt - len(project_positive_asset_ids) - # if no project_class_ids, project_negative_images_cnt set to 0 + total_asset_bytes = reduce(lambda s, v: s + v.byte_size, mir_metadatas.attributes.values(), 0) + mir_context.total_asset_mbytes = ceil(total_asset_bytes / mir_settings.BYTES_PER_MB) + + # cks cnt + for ck, ck_assets in mir_keywords.ck_idx.items(): + mir_context.cks_cnt[ck].cnt = len(ck_assets.asset_annos) + for sub_ck, sub_ck_to_assets in ck_assets.sub_indexes.items(): + mir_context.cks_cnt[ck].sub_cnt[sub_ck] = len(sub_ck_to_assets.key_ids) + + cls.__build_mir_context_stats(anno_stats=mir_context.pred_stats, + mir_metadatas=mir_metadatas, + task_annotations=mir_annotations.prediction, + keyword_to_index=mir_keywords.pred_idx) + cls.__build_mir_context_stats(anno_stats=mir_context.gt_stats, + mir_metadatas=mir_metadatas, + task_annotations=mir_annotations.ground_truth, + keyword_to_index=mir_keywords.gt_idx) @classmethod def __add_git_tag(cls, mir_root: str, tag: str) -> None: @@ -129,8 +153,13 @@ def __add_git_tag(cls, mir_root: str, tag: str) -> None: # public: save and load @classmethod - def save_and_commit(cls, mir_root: str, mir_branch: str, his_branch: Optional[str], mir_datas: dict, - task: mirpb.Task) -> int: + def save_and_commit(cls, + mir_root: str, + mir_branch: str, + his_branch: Optional[str], + mir_datas: Dict, + task: mirpb.Task, + evaluate_config: Optional[mirpb.EvaluateConfig] = None) -> int: """ saves and commit all contents in mir_datas to branch: `mir_branch`; branch will be created if not exists, and it's history will be after `his_branch` @@ -143,6 +172,7 @@ def save_and_commit(cls, mir_root: str, mir_branch: str, his_branch: Optional[st mir_tasks is needed, if mir_metadatas and mir_annotations not provided, they will be created as empty datasets task (mirpb.Task): task for this commit + evaluate_config (mirpb.EvaluateConfig): evaluate config Raises: MirRuntimeError @@ -154,6 +184,9 @@ def save_and_commit(cls, mir_root: str, mir_branch: str, his_branch: Optional[st mir_root = '.' if not mir_branch: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="empty mir branch") + if mirpb.MirStorage.MIR_METADATAS not in mir_datas or mirpb.MirStorage.MIR_ANNOTATIONS not in mir_datas: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='need mir_metadatas and mir_annotations') if mirpb.MirStorage.MIR_KEYWORDS in mir_datas: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='need no mir_keywords') if mirpb.MirStorage.MIR_CONTEXT in mir_datas: @@ -165,10 +198,13 @@ def save_and_commit(cls, mir_root: str, mir_branch: str, his_branch: Optional[st if not task.task_id: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty task id') - mir_tasks: mirpb.MirTasks = mirpb.MirTasks() - mir_tasks.head_task_id = task.task_id - mir_tasks.tasks[mir_tasks.head_task_id].CopyFrom(task) - mir_datas[mirpb.MirStorage.MIR_TASKS] = mir_tasks + if not evaluate_config: + evaluate_config = create_evaluate_config() + + # Build all mir_datas. + cls.__build_task_keyword_context(mir_datas=mir_datas, + task=task, + evaluate_config=evaluate_config) branch_exists = mir_repo_utils.mir_check_branch_exists(mir_root=mir_root, branch=mir_branch) if not branch_exists and not his_branch: @@ -195,7 +231,11 @@ def save_and_commit(cls, mir_root: str, mir_branch: str, his_branch: Optional[st if return_code != MirCode.RC_OK: return return_code - cls.__save(mir_root=mir_root, mir_datas=mir_datas) + # save to file + for ms, mir_data in mir_datas.items(): + mir_file_path = os.path.join(mir_root, mir_storage.mir_path(ms)) + with open(mir_file_path, "wb") as m_f: + m_f.write(mir_data.SerializeToString()) ret_code = CmdCommit.run_with_args(mir_root=mir_root, msg=task.name) if ret_code != MirCode.RC_OK: @@ -206,6 +246,7 @@ def save_and_commit(cls, mir_root: str, mir_branch: str, his_branch: Optional[st return ret_code + # public: load @classmethod def load_single_storage(cls, mir_root: str, @@ -249,136 +290,18 @@ def __message_to_dict(cls, message: Any) -> Dict: use_integers_for_enums=True, including_default_value_fields=True) - @classmethod - def load_single_model(cls, mir_root: str, mir_branch: str, mir_task_id: str = '') -> dict: - mir_storage_data: mirpb.MirTasks = cls.load_single_storage(mir_root=mir_root, - mir_branch=mir_branch, - ms=mirpb.MirStorage.MIR_TASKS, - mir_task_id=mir_task_id, - as_dict=False) - task = mir_storage_data.tasks[mir_storage_data.head_task_id] - if not task.model.model_hash: - raise MirError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="no model") - - single_model_dict = cls.__message_to_dict(task.model) - single_model_dict[mir_settings.TASK_CONTEXT_PARAMETERS_KEY] = task.serialized_task_parameters - single_model_dict[mir_settings.EXECUTOR_CONFIG_KEY] = yaml.safe_load(task.serialized_executor_config) or {} - return single_model_dict - - @classmethod - def load_single_dataset(cls, mir_root: str, mir_branch: str, mir_task_id: str = '') -> dict: - """ - exampled return data: - { - "class_ids_count": {3: 34}, - "class_names_count": {'cat': 34}, - "ignored_labels": {'cat':5, }, - "negative_info": { - "negative_images_cnt": 0, - "project_negative_images_cnt": 0, - }, - "total_images_cnt": 1, - } - """ - mir_storage_tasks, mir_storage_context = cls.load_multiple_storages( - mir_root=mir_root, - mir_branch=mir_branch, - ms_list=[mirpb.MirStorage.MIR_TASKS, mirpb.MirStorage.MIR_CONTEXT], - mir_task_id=mir_task_id, - as_dict=False, - ) - task_storage = mir_storage_tasks.tasks[mir_storage_tasks.head_task_id] - - class_id_mgr = class_ids.ClassIdManager(mir_root=mir_root) - return dict( - class_ids_count={k: v - for k, v in mir_storage_context.predefined_keyids_cnt.items()}, - class_names_count={ - class_id_mgr.main_name_for_id(id): count - for id, count in mir_storage_context.predefined_keyids_cnt.items() - }, - ignored_labels={k: v - for k, v in task_storage.unknown_types.items()}, - negative_info=dict( - negative_images_cnt=mir_storage_context.negative_images_cnt, - project_negative_images_cnt=mir_storage_context.project_negative_images_cnt, - ), - total_images_cnt=mir_storage_context.images_cnt, - ) - - @classmethod - def load_assets_content(cls, mir_root: str, mir_branch: str, mir_task_id: str = '') -> dict: - """ - exampled return data: - { - "all_asset_ids": ["asset_id"], - "asset_ids_detail": { - "asset_id": { - "metadata": {"asset_type": 2, "width": 1080, "height": 1620}, - "annotations": [{"box": {"x": 26, "y": 189, "w": 19, "h": 50}, "class_id": 2}], - "class_ids": [2], - } - }, - "class_ids_index": {2: ["asset_id"]}, - } - """ - # Require asset details, build snapshot. - mir_storage_metadatas, mir_storage_annotations, mir_storage_keywords = cls.load_multiple_storages( - mir_root=mir_root, - mir_branch=mir_branch, - ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS, mirpb.MirStorage.MIR_KEYWORDS], - mir_task_id=mir_task_id, - as_dict=True, - ) - - asset_ids_detail: Dict[str, Dict] = dict() - hid = mir_storage_annotations["head_task_id"] - annotations = mir_storage_annotations["task_annotations"][hid]["image_annotations"] - keyword_keyids_list = mir_storage_keywords["keywords"] - for asset_id, asset_metadata in mir_storage_metadatas["attributes"].items(): - asset_annotations = annotations[asset_id]["annotations"] if asset_id in annotations else {} - asset_class_ids = ( - keyword_keyids_list[asset_id]["predifined_keyids"] - if asset_id in keyword_keyids_list - else [] - ) - asset_ids_detail[asset_id] = dict( - metadata=asset_metadata, - annotations=asset_annotations, - class_ids=asset_class_ids, - ) - return dict( - all_asset_ids=sorted([*mir_storage_metadatas["attributes"].keys()]), # ordered list. - asset_ids_detail=asset_ids_detail, - class_ids_index={k: v["asset_ids"] for k, v in mir_storage_keywords["index_predifined_keyids"].items()}, - ) - - @classmethod - def load_dataset_evaluations(cls, mir_root: str, mir_branch: str, mir_task_id: str = '') -> dict: - mir_storage_data: mirpb.MirTasks = cls.load_single_storage(mir_root=mir_root, - mir_branch=mir_branch, - ms=mirpb.MirStorage.MIR_TASKS, - mir_task_id=mir_task_id, - as_dict=False) - task = mir_storage_data.tasks[mir_storage_data.head_task_id] - if not task.evaluation.dataset_evaluations: - raise MirError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="no dataset evaluation") - - dataset_evaluations = cls.__message_to_dict(task.evaluation) - return dataset_evaluations["dataset_evaluations"] - def create_task(task_type: 'mirpb.TaskType.V', task_id: str, message: str, - unknown_types: Dict[str, int] = {}, - model_hash: str = '', - model_mAP: float = 0, + new_types: Dict[str, int] = {}, + new_types_added: bool = False, return_code: int = 0, return_msg: str = '', serialized_task_parameters: str = '', serialized_executor_config: str = '', executor: str = '', + model_meta: mirpb.ModelMeta = None, evaluation: mirpb.Evaluation = None, src_revs: str = '', dst_rev: str = '') -> mirpb.Task: @@ -391,11 +314,8 @@ def create_task(task_type: 'mirpb.TaskType.V', 'return_msg': return_msg, 'serialized_task_parameters': serialized_task_parameters, 'serialized_executor_config': serialized_executor_config, - 'unknown_types': unknown_types, - 'model': { - 'model_hash': model_hash, - 'mean_average_precision': model_mAP, - }, + 'new_types': new_types, + 'new_types_added': new_types_added, 'executor': executor, 'src_revs': src_revs, 'dst_rev': dst_rev, @@ -403,6 +323,9 @@ def create_task(task_type: 'mirpb.TaskType.V', task: mirpb.Task = mirpb.Task() json_format.ParseDict(task_dict, task) + if model_meta: + task.model.CopyFrom(model_meta) + if evaluation: task.evaluation.CopyFrom(evaluation) diff --git a/ymir/command/mir/tools/models.py b/ymir/command/mir/tools/models.py new file mode 100644 index 0000000000..076c2ed272 --- /dev/null +++ b/ymir/command/mir/tools/models.py @@ -0,0 +1,165 @@ +import logging +import os +from pydantic import BaseModel, Field +import shutil +import tarfile +from typing import Any, Dict, List, Tuple + +from google.protobuf import json_format +import yaml + +from mir.tools.code import MirCode +from mir.tools.errors import MirRuntimeError +from mir.tools.mir_storage import sha1sum_for_file +from mir.protos import mir_command_pb2 as mirpb + + +class ModelStageStorage(BaseModel): + stage_name: str + files: List[str] + mAP: float = Field(..., ge=0, le=1) + timestamp: int + + +class ModelStorage(BaseModel): + executor_config: Dict[str, Any] + task_context: Dict[str, Any] + stages: Dict[str, ModelStageStorage] + best_stage_name: str + model_hash: str = '' + stage_name: str = '' + attachments: Dict[str, List[str]] = {} + package_version: str = Field(..., min_length=1) + + @property + def class_names(self) -> List[str]: + return self.executor_config['class_names'] + + def get_model_meta(self) -> mirpb.ModelMeta: + model_meta = mirpb.ModelMeta() + json_format.ParseDict( + { + 'mean_average_precision': self.stages[self.best_stage_name].mAP, + 'model_hash': self.model_hash, + 'stages': {k: v.dict() + for k, v in self.stages.items()}, + 'best_stage_name': self.best_stage_name, + 'class_names': self.class_names, + }, model_meta) + return model_meta + + +def parse_model_hash_stage(model_hash_stage: str) -> Tuple[str, str]: + """ + parse model hash and stage name from string: `model_hash@stage_name` + """ + components = model_hash_stage.split('@') + model_hash = '' + stage_name = '' + if len(components) == 2: + model_hash, stage_name = components + if not model_hash or not stage_name: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"invalid model hash stage: {model_hash_stage}") + return (model_hash, stage_name) + + +def prepare_model(model_location: str, model_hash: str, stage_name: str, dst_model_path: str) -> ModelStorage: + """ + unpack model to `dst_model_path` and returns ModelStorage instance + + Args: + model_location (str): model storage dir + model_hash (str): hash of model package + stage_name (str): model stage name, empty string to unpack all model files + dst_model_path (str): path to destination model directory + + Raises: + MirRuntimeError: if dst_model_path is not a directory + MirRuntimeError: if model not found + MirRuntimeError: if model package is invalid (lacks params, json or config file) + + Returns: + ModelStorage + """ + if not model_location or not model_hash: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='empty model_location or model_hash') + tar_file_path = os.path.join(model_location, model_hash) + if not os.path.isfile(tar_file_path): + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"tar_file is not a file: {tar_file_path}") + + os.makedirs(dst_model_path, exist_ok=True) + + logging.info(f"extracting models: {tar_file_path}, stage: {stage_name}") + with tarfile.open(tar_file_path, 'r') as tar_file: + # get model_stage of this package + tar_file.extract('ymir-info.yaml', dst_model_path) + with open(os.path.join(dst_model_path, 'ymir-info.yaml'), 'r') as f: + ymir_info_dict = yaml.safe_load(f.read()) + + model_storage = ModelStorage.parse_obj(ymir_info_dict) + model_storage.model_hash = model_hash + model_storage.stage_name = stage_name + + stage_and_file_names = [f"{stage_name}/{file_name}" for file_name in model_storage.stages[stage_name].files] + os.makedirs(os.path.join(dst_model_path, stage_name), exist_ok=True) + for name in stage_and_file_names: + tar_file.extract(name, dst_model_path) + + return model_storage + + +def pack_and_copy_models(model_storage: ModelStorage, model_dir_path: str, model_location: str) -> str: + """ + pack model, returns model hash of the new model package + """ + logging.info(f"packing models: {model_dir_path} -> {model_location}, stages: {model_storage.stages.keys()}") + + ymir_info_file_name = 'ymir-info.yaml' + ymir_info_file_path = os.path.join(model_dir_path, ymir_info_file_name) + with open(ymir_info_file_path, 'w') as f: + yaml.safe_dump(model_storage.dict(), f) + + tar_file_path = os.path.join(model_dir_path, 'model.tar.gz') + with tarfile.open(tar_file_path, 'w:gz') as tar_gz_f: + # packing models + for stage_name, stage in model_storage.stages.items(): + stage_dir = os.path.join(model_dir_path, stage_name) + for file_name in stage.files: + # find model file in `stage_dir`, and then `model_dir` + # compatible with old docker images + file_path = _find_model_file(model_dirs=[stage_dir, model_dir_path], file_name=file_name) + tar_file_key = f"{stage_name}/{file_name}" + tar_gz_f.add(file_path, tar_file_key) + + # packing attachments + for section, file_names in model_storage.attachments.items(): + section_dir = os.path.join(model_dir_path, 'attachments', section) + for file_name in file_names: + file_path = os.path.join(section_dir, file_name) + tar_file_key = f"attachments/{section}/{file_name}" + tar_gz_f.add(file_path, tar_file_key) + + # packing ymir-info.yaml + tar_gz_f.add(ymir_info_file_path, ymir_info_file_name) + + os.makedirs(model_location, exist_ok=True) + model_hash = sha1sum_for_file(tar_file_path) + shutil.copyfile(tar_file_path, os.path.join(model_location, model_hash)) + os.remove(tar_file_path) + + logging.info(f"pack success, model hash: {model_hash}, best_stage_name: {model_storage.best_stage_name}, " + f"mAP: {model_storage.stages[model_storage.best_stage_name].mAP}") + + model_storage.model_hash = model_hash + return model_hash + + +def _find_model_file(model_dirs: List[str], file_name: str) -> str: + for model_dir in model_dirs: + file_path = os.path.join(model_dir, file_name) + if os.path.isfile(file_path): + return file_path + raise FileNotFoundError(f"File not found: {file_name} in following dirs: {model_dirs}") diff --git a/ymir/command/mir/tools/phase_logger_conf.json b/ymir/command/mir/tools/phase_logger_conf.json index 992420bb22..462fe49478 100644 --- a/ymir/command/mir/tools/phase_logger_conf.json +++ b/ymir/command/mir/tools/phase_logger_conf.json @@ -1,8 +1,12 @@ { "import": [ + { + "name": "init", + "delta": 0.05 + }, { "name": "hash", - "delta": 0.4 + "delta": 0.35 }, { "name": "metadatas", @@ -14,9 +18,13 @@ } ], "copy": [ + { + "name": "init", + "delta": 0.05 + }, { "name": "read", - "delta": 0.4 + "delta": 0.35 }, { "name": "change", @@ -34,9 +42,13 @@ } ], "filter": [ + { + "name": "init", + "delta": 0.05 + }, { "name": "read", - "delta": 0.2 + "delta": 0.15 }, { "name": "change", diff --git a/ymir/command/mir/tools/revs_parser.py b/ymir/command/mir/tools/revs_parser.py index 7fc860f383..40f6e16166 100644 --- a/ymir/command/mir/tools/revs_parser.py +++ b/ymir/command/mir/tools/revs_parser.py @@ -7,10 +7,10 @@ class TypRevTid: __slots__ = ("typ", "rev", "tid") - def __init__(self) -> None: - self.typ = '' - self.rev = '' - self.tid = '' + def __init__(self, typ: str = '', rev: str = '', tid: str = '') -> None: + self.typ = typ + self.rev = rev + self.tid = tid def __repr__(self) -> str: return f"(t: {self.typ}, r: {self.rev}, t: {self.tid})" @@ -27,6 +27,10 @@ def __eq__(self, o: object) -> bool: def rev_tid(self) -> str: return join_rev_tid(self.rev, self.tid) + @property + def typ_rev_tid(self) -> str: + return f"{self.typ}:{self.rev_tid}" if self.typ else self.rev_tid + # public: parse methods def parse_arg_revs(src_revs: str) -> List[TypRevTid]: diff --git a/ymir/command/mir/tools/settings.py b/ymir/command/mir/tools/settings.py index 4a243ce0e5..8fc5d43d8d 100644 --- a/ymir/command/mir/tools/settings.py +++ b/ymir/command/mir/tools/settings.py @@ -3,4 +3,17 @@ EXECUTOR_CONFIG_KEY = 'executor_config' TASK_CONTEXT_KEY = 'task_context' TASK_CONTEXT_PARAMETERS_KEY = 'task_parameters' +TASK_CONTEXT_PREPROCESS_KEY = 'preprocess' EXECUTOR_OUTLOG_NAME = 'ymir-executor-out.log' + +BYTES_PER_MB = 1048576 +ASSET_LIMIT_PER_DATASET = 1000000 + +# evaluate default args +DEFAULT_EVALUATE_CONF_THR = 0.005 +DEFAULT_EVALUATE_IOU_THR = '0.5' +DEFAULT_EVALUATE_SUB_CKS = 10 + +# evaluation limitations +MAX_EVALUATION_ASSETS_COUNT = 50000 +MAX_EVALUATION_CLASS_IDS_COUNT = 20 diff --git a/ymir/command/mir/tools/utils.py b/ymir/command/mir/tools/utils.py deleted file mode 100644 index 59ad37c36a..0000000000 --- a/ymir/command/mir/tools/utils.py +++ /dev/null @@ -1,339 +0,0 @@ -from dataclasses import asdict, dataclass, field -from functools import wraps -import linecache -import logging -import os -import pathlib -import time -import requests -import shutil -import tarfile -from typing import Any, Callable, Dict, List, Optional, Union - -from PIL import Image, UnidentifiedImageError -from pydantic import BaseModel -import yaml - -from mir import scm -from mir.tools import hash_utils, settings as mir_settings -from mir.tools.code import MirCode -from mir.tools.errors import MirRuntimeError - - -def time_it(f: Callable) -> Callable: - @wraps(f) - def wrapper(*args: tuple, **kwargs: Dict) -> Callable: - _start = time.time() - _ret = f(*args, **kwargs) - _cost = time.time() - _start - logging.info(f"|-{f.__name__} costs {_cost:.2f}s({_cost / 60:.2f}m).") - return _ret - - return wrapper - - -# project -def project_root() -> str: - root = str(pathlib.Path(__file__).parent.parent.parent.absolute()) - return root - - -# mir repo infos -def mir_repo_head_name(git: Union[str, scm.CmdScm]) -> Optional[str]: - """ get current mir repo head name (may be branch, or commit id) """ - git_scm = None - if isinstance(git, str): - git_scm = scm.Scm(git, scm_executable="git") - elif isinstance(git, scm.CmdScm): - git_scm = git - else: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="invalid git: needs str or CmdScm") - - git_result = git_scm.rev_parse(["--abbrev-ref", "HEAD"]) - if isinstance(git_result, str): - return git_result - elif isinstance(git_result, bytes): - return git_result.decode("utf-8") - return str(git_result) - - -def mir_repo_commit_id(git: Union[str, scm.CmdScm], branch: str = "HEAD") -> str: - """ get mir repo branch's commit id """ - git_scm = None - if isinstance(git, str): - git_scm = scm.Scm(git, scm_executable="git") - elif isinstance(git, scm.CmdScm): - git_scm = git - else: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="invalid git: needs str or CmdScm") - - git_result = git_scm.rev_parse(branch) - if isinstance(git_result, str): - return git_result - elif isinstance(git_result, bytes): - return git_result.decode("utf-8") - return str(git_result) - - -# Store assets in asset_ids to out_root/sub_folder, -# return relative path to the out_root, staring with sub_folder. -# Set overwrite to False to avoid overwriting. -def store_assets_to_dir(asset_ids: List[str], - out_root: str, - sub_folder: str, - asset_location: str, - overwrite: bool = False, - create_prefix: bool = True, - need_suffix: bool = True) -> Dict[str, str]: - """ - load assets in location and save them to destination local folder - Args: - asset_ids: a list of asset ids (asset hashes) - out_root: the root of output path - sub_folder: sub folder to the output path, if no sub, set to '.' - asset_location: server location prefix of assets, if set to none, try to read it from mir repo config - overwrite (bool): if True, still copy assets even if assets already exists in export dir - create_prefix (bool): use last 2 chars of asset id as a sub dir - """ - # if out_root exists, but not a folder, raise error - if os.path.exists(out_root) and not os.path.isdir(out_root): - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="invalid out_root") - os.makedirs(out_root, exist_ok=True) - sub_dir_abs = os.path.join(out_root, sub_folder) - os.makedirs(sub_dir_abs, exist_ok=True) - - assets_location = _get_assets_location(asset_ids, asset_location) - - unknown_format_count = 0 - total_count = len(asset_ids) - asset_id_to_rel_paths: Dict[str, str] = {} - for idx, asset_id in enumerate(asset_ids): - if create_prefix: - suffix = asset_id[-2:] - sub_sub_folder_abs = os.path.join(sub_dir_abs, suffix) - os.makedirs(sub_sub_folder_abs, exist_ok=True) - sub_sub_folder_rel = os.path.join(sub_folder, suffix) - else: - sub_sub_folder_abs = sub_dir_abs - sub_sub_folder_rel = sub_folder.strip("./") - - if need_suffix: - try: - asset_image = Image.open(assets_location[asset_id]) - file_format = asset_image.format.lower() # type: ignore - except UnidentifiedImageError: - file_format = 'unknown' - unknown_format_count += 1 - - file_name = (f"{asset_id}.{file_format.lower()}" if need_suffix else asset_id) - asset_path_abs = os.path.join(sub_sub_folder_abs, file_name) # path started from out_root - asset_path_rel = os.path.join(sub_sub_folder_rel, file_name) # path started from sub_folder - _store_asset_to_location(assets_location[asset_id], asset_path_abs, overwrite=overwrite) - asset_id_to_rel_paths[asset_id] = asset_path_rel - - if idx > 0 and idx % 5000 == 0: - logging.info(f"exporting {idx} / {total_count} assets") - - if unknown_format_count > 0: - logging.warning(f"unknown format asset count: {unknown_format_count}") - - return asset_id_to_rel_paths - - -def _store_asset_to_location(src: str, dst: str, overwrite: bool = False) -> None: - if not src or not dst: - return - os.makedirs(os.path.dirname(dst), exist_ok=True) - if not overwrite and os.path.isfile(dst): - return - if src.startswith('http'): # from http request - response = requests.get(src) - if len(response.content) > 0: - with open(dst, "wb") as f: - f.write(response.content) - elif src.startswith('/'): # from filesystem, require abs path. - shutil.copyfile(src, dst) - else: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"Invalid src, not a abs path: {src}") - - -def _get_assets_location(asset_ids: List[str], asset_location: str) -> Dict[str, str]: - """ - get asset locations - Args: - asset_ids: a list of asset ids (asset hashes) - asset_location: the server location of assets. - Returns: - a dict, key: asset id, value: asset location url - Raises: - Attribute exception if asset_location is not set, and can not be found in config file - """ - - # asset_location is a required field. - # CMD layer should NOT aware where the asset is stored. - if not asset_location: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="asset_location is not set.") - - return {id: os.path.join(asset_location, id) for id in asset_ids} - - -@dataclass -class ModelStorage: - models: List[str] = field(default_factory=list) - executor_config: Dict[str, Any] = field(default_factory=dict) - task_context: Dict[str, Any] = field(default_factory=dict) - class_names: List[str] = field(init=False) - - def __post_init__(self) -> None: - self.class_names = self.executor_config.get('class_names', []) - - # check valid - if not self.models or not self.executor_config or not self.task_context or not self.class_names: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message='ModelStorage invalid: not enough infomations') - - def as_dict(self) -> Dict[str, Any]: - return asdict(self) - - -def prepare_model(model_location: str, model_hash: str, dst_model_path: str) -> ModelStorage: - """ - unpack model to `dst_model_path` - - Args: - model_location (str): model storage dir - model_hash (str): hash or name of model package - dst_model_path (str): path to destination model directory - - Raises: - MirRuntimeError: if dst_model_path is not a directory - MirRuntimeError: if model not found - MirRuntimeError: if model package is invalid (lacks params, json or config file) - - Returns: - ModelStorage: rel path to params, json, weights file and config file (start from dest_root) - """ - tar_file = os.path.join(model_location, model_hash) - if not os.path.isfile(tar_file): - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f"tar_file is not a file: {tar_file}") - - os.makedirs(dst_model_path, exist_ok=True) - logging.info(f"extracting models from {tar_file}") - with tarfile.open(tar_file, 'r') as tar_gz: - for item in tar_gz: - logging.info(f"extracting {item} -> {dst_model_path}") - tar_gz.extract(item, dst_model_path) - - with open(os.path.join(dst_model_path, 'ymir-info.yaml'), 'r') as f: - ymir_info_dict = yaml.safe_load(f.read()) - model_storage = ModelStorage(models=ymir_info_dict['models'], - executor_config=ymir_info_dict[mir_settings.EXECUTOR_CONFIG_KEY], - task_context=ymir_info_dict[mir_settings.TASK_CONTEXT_KEY]) - - return model_storage - - -def pack_and_copy_models(model_storage: ModelStorage, model_dir_path: str, model_location: str) -> str: - """ - pack model, returns model hash of the new model package - """ - logging.info(f"packing models {model_dir_path} -> {model_location}") - - ymir_info_file_name = 'ymir-info.yaml' - ymir_info_file_path = os.path.join(model_dir_path, ymir_info_file_name) - with open(ymir_info_file_path, 'w') as f: - yaml.safe_dump(model_storage.as_dict(), f) - - tar_file_path = os.path.join(model_dir_path, 'model.tar.gz') - with tarfile.open(tar_file_path, 'w:gz') as tar_gz_f: - for model_name in model_storage.models: - model_path = os.path.join(model_dir_path, model_name) - logging.info(f" packing {model_path} -> {model_name}") - tar_gz_f.add(model_path, model_name) - logging.info(f" packing {ymir_info_file_path} -> {ymir_info_file_name}") - tar_gz_f.add(ymir_info_file_path, ymir_info_file_name) - - model_hash = hash_utils.sha1sum_for_file(tar_file_path) - shutil.copyfile(tar_file_path, os.path.join(model_location, model_hash)) - os.remove(tar_file_path) - - logging.info(f"pack success, model hash: {model_hash}") - - return model_hash - - -def repo_dot_mir_path(mir_root: str) -> str: - dir = os.path.join(mir_root, '.mir') - os.makedirs(dir, exist_ok=True) - return dir - - -# see also: sample_executor/ef/env.py -class _EnvInputConfig(BaseModel): - root_dir: str = '/in' - assets_dir: str = '/in/assets' - annotations_dir: str = '/in/annotations' - models_dir: str = '/in/models' - training_index_file: str = '' - val_index_file: str = '' - candidate_index_file: str = '' - config_file: str = '/in/config.yaml' - - -class _EnvOutputConfig(BaseModel): - root_dir: str = '/out' - models_dir: str = '/out/models' - tensorboard_dir: str = '/out/tensorboard' - training_result_file: str = '/out/models/result.yaml' - mining_result_file: str = '/out/result.tsv' - infer_result_file: str = '/out/infer-result.json' - monitor_file: str = '/out/monitor.txt' - - -class _EnvConfig(BaseModel): - task_id: str = 'default-task' - run_training: bool = False - run_mining: bool = False - run_infer: bool = False - - input: _EnvInputConfig = _EnvInputConfig() - output: _EnvOutputConfig = _EnvOutputConfig() - - -def generate_training_env_config_file(task_id: str, env_config_file_path: str) -> None: - env_config = _EnvConfig() - env_config.task_id = task_id - env_config.run_training = True - env_config.input.training_index_file = '/in/train-index.tsv' - env_config.input.val_index_file = '/in/val-index.tsv' - - with open(env_config_file_path, 'w') as f: - yaml.safe_dump(env_config.dict(), f) - - -def generate_mining_infer_env_config_file(task_id: str, run_mining: bool, run_infer: bool, - env_config_file_path: str) -> None: - # TODO: seperate command mining and infer - env_config = _EnvConfig() - env_config.task_id = task_id - env_config.run_mining = run_mining - env_config.run_infer = run_infer - env_config.input.candidate_index_file = '/in/candidate-index.tsv' - - with open(env_config_file_path, 'w') as f: - yaml.safe_dump(env_config.dict(), f) - - -def collect_executor_outlog_tail(work_dir: str, tail_line_count: int = 5) -> str: - out_log_path = os.path.join(work_dir, 'out', mir_settings.EXECUTOR_OUTLOG_NAME) - if not os.path.isfile(out_log_path): - return '' - - tail_lines = linecache.getlines(out_log_path)[-1 * tail_line_count:] - if not tail_lines: - return '' - - joint_tail_lines = ''.join(tail_lines) - return f"EXECUTOR OUTLOG TAIL FROM: {out_log_path}\n{joint_tail_lines}" diff --git a/ymir/command/mir/version.py b/ymir/command/mir/version.py index e2d37f7f10..516041719c 100644 --- a/ymir/command/mir/version.py +++ b/ymir/command/mir/version.py @@ -1,2 +1,29 @@ -# Package version -__version__ = '1.1.0' +# Current ymir system version +YMIR_VERSION = '2.0.0' + +# Default sandbox version +DEFAULT_YMIR_SRC_VERSION = '1.1.0' + +# Protocol version for training, mining and infer executors +TMI_PROTOCOL_VERSION = '1.1.0' + + +def ymir_salient_version(ver: str) -> str: + _SALIENT_VERSIONS = { + DEFAULT_YMIR_SRC_VERSION: DEFAULT_YMIR_SRC_VERSION, + '1.3.0': '2.0.0', + '2.0.0': '2.0.0', + } + return _SALIENT_VERSIONS[ver] + + +def ymir_model_salient_version(ver: str) -> str: + """ + get model package version from ymir version + """ + _PACKAGE_VERSIONS = { + DEFAULT_YMIR_SRC_VERSION: DEFAULT_YMIR_SRC_VERSION, + '1.3.0': '2.0.0', + '2.0.0': '2.0.0', + } + return _PACKAGE_VERSIONS[ver] diff --git a/ymir/command/proto/mir_command.proto b/ymir/command/proto/mir_command.proto index 570513d396..ddab17fb85 100644 --- a/ymir/command/proto/mir_command.proto +++ b/ymir/command/proto/mir_command.proto @@ -2,6 +2,8 @@ syntax = "proto3"; package mir.command; +option go_package = "/protos"; + /// assertion type: training, validation or test enum TvtType { TvtTypeUnknown = 0; @@ -40,9 +42,9 @@ enum TaskType { TaskTypeFusion = 11; TaskTypeInit = 12; TaskTypeImportModel = 13; + TaskTypeCopyModel = 14; + TaskTypeDatasetInfer = 15; TaskTypeEvaluate = 16; - - reserved 14, 15; }; enum TaskState { @@ -68,10 +70,19 @@ enum MirStorage { MIR_CONTEXT = 4; } -enum LabelFormat { - NO_ANNOTATION = 0; - PASCAL_VOC = 1; - IF_ARK = 2; +enum AnnoFormat { + AF_NO_ANNOTATION = 0; + AF_DET_PASCAL_VOC = 1; + AF_DET_ARK_JSON = 2; + AF_DET_LS_JSON = 3; + AF_SEG_POLYGON = 4; + AF_SEG_MASK = 5; +}; + +enum AssetFormat { + AF_UNKNOWN = 0; + AF_RAW = 1; + AF_LMDB = 2; }; /// ========== metadatas.mir ========== @@ -81,44 +92,109 @@ message MirMetadatas { }; message MetadataAttributes { - string dataset_name = 1; Timestamp timestamp = 2; TvtType tvt_type = 3; AssetType asset_type = 4; int32 width = 5; /// column number int32 height = 6; /// row number int32 image_channels = 7; /// (for images) channel count + int32 byte_size = 8; + string origin_filename = 9; + + reserved 1; }; message Timestamp { - /// start time stamp - int64 start = 1; + /// start time stamp, use int32 as int64 is not correctly parsed. + int32 start = 1; /// duration (in seconds), for images, it's always 0 float duration = 2; }; /// ========== annotations.mir ========== message MirAnnotations { - /// key: task id, value: annotations of that single task - map task_annotations = 1; - string head_task_id = 2; + SingleTaskAnnotations ground_truth = 3; + SingleTaskAnnotations prediction = 4; + // key: asset id, value: cks and image quality, from pred and gt + map image_cks = 5; + + reserved 1, 2; +}; + +enum AnnoType { + AT_UNKNOWN = 0; + AT_CLASS = 1; // Classification with class id, not implemented. + AT_DET_BOX = 2; // Detection w. bounding box. + AT_SEG_POLYGON = 3; // Semantic Segmentation w. ploygons. + AT_SEG_MASK = 4; // Instance Segmentation w. mask. }; message SingleTaskAnnotations { /// key: image id, value: annotations of that single image map image_annotations = 1; + string task_id = 2; + AnnoType type = 3; + // Set of all shown class ids. + repeated int32 task_class_ids = 4; + map map_id_color = 5; + + // meta infos of this SingleTaskAnnotations + repeated int32 eval_class_ids = 10; + // model meta info associated with this single_task_annotations + ModelMeta model = 11; + // executor config used to generate this single task annotations + string executor_config = 12; }; message SingleImageAnnotations { - repeated Annotation annotations = 2; + repeated ObjectAnnotation boxes = 2; + + repeated ObjectAnnotation polygons = 3; + + MaskAnnotation mask = 4; + // Set of class ids shown in this image. + repeated int32 img_class_ids = 5; + + reserved 1; }; -message Annotation { +message SingleImageCks { + map cks = 1; + float image_quality = 2; +} + +message MaskAnnotation { + // PNG image with 3 channels where each pixel corresponds to a class_id. + bytes semantic_mask = 1; + // PNG image with 3 channels where each pixel corresponds to an object_id. + bytes instance_mask = 2; + repeated int32 object_ids = 3; +} + +message ObjectAnnotation { // Index of this annotation in current single image, may be different from the index in repeated field. int32 index = 1; Rect box = 2; int32 class_id = 3; double score = 4; + float anno_quality = 5; + map tags = 6; + ConfusionMatrixType cm = 7; + int32 det_link_id = 8; + string class_name = 9; // for data parsed from outside, e.g. inference. + repeated IntPoint polygon = 10; +}; + +enum ConfusionMatrixType { + NotSet = 0; + TP = 1; + FP = 2; + FN = 3; + TN = 4; + Unknown = 5; + // Matched True Positive, only for gt. + MTP = 11; + IGNORED = 12; }; message Rect { @@ -126,28 +202,42 @@ message Rect { int32 y = 2; int32 w = 3; int32 h = 4; + float rotate_angle = 5; // unit in pi. }; /// ========== keywords.mir ========== message MirKeywords { - // key: asset hash, value: keywords list - // cnt: count of keywords - map keywords = 1; - // key: class id, value: assert ids - map index_predifined_keyids = 6; + CiTagToIndex pred_idx = 7; // ci to assets, generated from preds + CiTagToIndex gt_idx = 8; // ci to assets, generated from gt + + // key: ck main key, value: assets and assets with sub keys, from (mir_annotations.image_cks) pred and gt + map ck_idx = 9; - reserved 2, 3, 4, 5; + reserved 1, 2, 3, 4, 5, 6; }; -message Assets { +message CiTagToIndex { + // key: ci, value: annos + map cis = 1; + // key: ck main key, value: annos and annos with sub keys + map tags = 2; +}; + +message StringList { repeated string asset_ids = 1; }; -message Keywords { - // predefined: managed id-keyword map - repeated int32 predifined_keyids = 1; - // customized: arbitrary user defined keywords - repeated string customized_keywords = 2; +message MapStringToInt32List { + map key_ids = 1; +}; + +message Int32List { + repeated int32 ids = 1; +} + +message AssetAnnoIndex { + map asset_annos = 1; // key: asset id, value: annotation indexes + map sub_indexes = 2; // key: ck value, value: asset and it's annotation indexes }; /// ========== tasks.mir ========== @@ -163,14 +253,16 @@ message Task { /// auto generated unique id string task_id = 3; /// execution time of this task - int64 timestamp = 5; // RFC 3339 date strings - /// (special for training task): result model for cmd train + int32 timestamp = 5; // RFC 3339 date strings + /// (for training task): result model for cmd train ModelMeta model = 6; - /// (special for import task): unknown types for cmd import - map unknown_types = 7; int32 return_code = 8; string return_msg = 9; Evaluation evaluation = 10; + /// (for import task): new types for cmd import, key: class name, value: asset count + map new_types = 11; + /// (for import task): reason for new types, True: added, False: ignored + bool new_types_added = 12; string serialized_task_parameters = 102; string serialized_executor_config = 103; @@ -178,7 +270,7 @@ message Task { string dst_rev = 105; string executor = 106; - reserved 4, 100, 101; + reserved 4, 7, 100, 101; }; message ModelMeta { @@ -188,37 +280,54 @@ message ModelMeta { float mean_average_precision = 2; /// context generated by train command string context = 3; + map stages = 4; + string best_stage_name = 5; + repeated string class_names = 6; +}; + +message ModelStage { + string stage_name = 1; + repeated string files = 2; + int32 timestamp = 3; + float mAP = 4; }; message Evaluation { EvaluateConfig config = 1; - // key: prediction dataset id, value: evaluation result for ground truth and prediction dataset - map dataset_evaluations = 2; + SingleDatasetEvaluation dataset_evaluation = 3; + SingleDatasetEvaluation main_ck = 4; + map sub_cks = 5; + EvaluationState state = 6; + + reserved 2; } message EvaluateConfig { - string gt_dataset_id = 1; - repeated string pred_dataset_ids = 2; float conf_thr = 3; string iou_thrs_interval = 4; bool need_pr_curve = 5; + repeated int32 class_ids = 7; + string main_ck = 8; + + reserved 1, 2, 6; } message SingleDatasetEvaluation { float conf_thr = 1; - string gt_dataset_id = 2; - string pred_dataset_id = 3; map iou_evaluations = 4; // key: string of iou threshold SingleIouEvaluation iou_averaged_evaluation = 5; // average for all ious + + reserved 2, 3; } message SingleIouEvaluation { - map ci_evaluations = 1; // key: class ids - SingleTopicEvaluation ci_averaged_evaluation = 2; // evaluations averaged by class ids - map topic_evaluations = 3; // key: topic names + map ci_evaluations = 1; // key: class ids + SingleEvaluationElement ci_averaged_evaluation = 2; // evaluations averaged by class ids + + reserved 3; } -message SingleTopicEvaluation { +message SingleEvaluationElement { float ap = 1; float ar = 2; int32 tp = 3; @@ -227,23 +336,82 @@ message SingleTopicEvaluation { repeated FloatPoint pr_curve = 6; } +message IntPoint { + int32 x = 1; + int32 y = 2; + int32 z = 3; +} + message FloatPoint { float x = 1; float y = 2; + float z = 3; +} + +enum EvaluationState { + // evaluate not started + ES_NOT_SET = 0; + // evaluation result ready to use + ES_READY = 1; + // evaluation not finished because there's no gt or pred + ES_NO_GT_OR_PRED = 2; + // evaluation not finished because there're too many images or too many class ids + ES_EXCEEDS_LIMIT = 3; + // evaluation not finished because there's no evaluate class ids + ES_NO_CLASS_IDS = 4; } /// ========== context.mir ========== message MirContext { /// total images count int32 images_cnt = 1; - /// total negative images count (images without any annotations) - int32 negative_images_cnt = 2; - /// total negative images count (images without any project class names) - int32 project_negative_images_cnt = 3; - /// key: class id, value: images count - map predefined_keyids_cnt = 4; - /// key: class id (only in this project), value: images count - map project_predefined_keyids_cnt = 5; - /// key: customized keywords, value: images count - map customized_keywords_cnt = 6; + + /// from pred and gt + map cks_cnt = 6; + + int32 total_asset_mbytes = 11; + + AnnoStats pred_stats = 100; + AnnoStats gt_stats = 101; + + reserved 2, 3, 4, 5, 7, 8, 9, 10, 12; +}; + +message SingleMapCount { + int32 cnt = 1; + map sub_cnt = 2; +}; + +message AnnoStats { + int32 total_cnt = 1; + int32 positive_asset_cnt = 2; + int32 negative_asset_cnt = 3; + map tags_cnt = 7; // key: main tag name, value: main tag count and sub tag names and counts + map class_ids_cnt = 8; // key: class ids, value: asset count for this class id + + // Shortcut of class_ids for evaluation (dup. field as in SingleTaskAnnotations). + repeated int32 eval_class_ids = 9; + + reserved 4, 5, 6; +}; + +message ExportConfig { + // Asset config. + AssetFormat asset_format = 1; + string asset_dir = 2; + string asset_index_file = 3; + // Index file writes abs path. In TMI case, path should be converted, e.g. /in/assets. + string asset_index_prefix = 4; + string media_location = 5; + bool need_sub_folder = 6; + + // Annotation config. + AnnoFormat anno_format = 50; + string gt_dir = 51; + string gt_index_file = 52; + string gt_index_prefix = 53; + string pred_dir = 54; + string pred_index_file = 55; + string pred_index_prefix = 56; + string tvt_index_dir = 57; } diff --git a/ymir/command/requirements-ci.txt b/ymir/command/requirements-ci.txt index 63467ba455..c9823f78a9 100644 --- a/ymir/command/requirements-ci.txt +++ b/ymir/command/requirements-ci.txt @@ -1,6 +1,6 @@ flake8==3.8.3 mypy==0.910 -pre-commit +pre-commit==2.15.0 pytest==6.2.4 pytest-cov==2.12.1 pytest-mock==3.6.1 @@ -8,3 +8,4 @@ pytest-xdist==2.5.0 types-PyYAML==6.0.1 types-protobuf==3.18.2 types-requests==2.25.2 +types-retry==0.9.7 diff --git a/ymir/command/requirements.txt b/ymir/command/requirements.txt index d943a919f9..398720a7c5 100644 --- a/ymir/command/requirements.txt +++ b/ymir/command/requirements.txt @@ -1,8 +1,11 @@ -Pillow>=8.2.0 -fasteners>=0.16.3 -numpy==1.21.2 +Pillow==8.2.0 +fasteners==0.16.3 +lmdb==1.3.0 +numpy==1.22.0 protobuf==3.18.1 -pydantic>=1.8.2 -pyyaml>=5.4.1 -requests>=2.25.1 -tensorboardX>=2.4.1 +pydantic==1.9.0 +pyyaml==5.4.1 +requests==2.25.1 +retry==0.9.2 +tensorboardX==2.4.1 +xmltodict==0.12.0 diff --git a/ymir/command/setup.cfg b/ymir/command/setup.cfg index 83fd78fca8..8be0cb7366 100644 --- a/ymir/command/setup.cfg +++ b/ymir/command/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.1.0 +current_version = 1.2.1 [bumpversion:file:mir/version.py] search = '{current_version}' diff --git a/ymir/command/setup.py b/ymir/command/setup.py index 627a689966..dcd31f254e 100644 --- a/ymir/command/setup.py +++ b/ymir/command/setup.py @@ -2,7 +2,7 @@ from mir import version -print(version.__version__) +print(version.YMIR_VERSION) # Module dependencies requirements = [] @@ -12,7 +12,7 @@ setup( name='ymir-cmd', - version=version.__version__, + version=version.YMIR_VERSION, python_requires=">=3.8.10", author_email="contact.viesc@gmail.com", description="mir: A data version control tool for YMIR", diff --git a/ymir/command/tests/assets/2007_000032.xml b/ymir/command/tests/assets/2007_000032.xml index 1a4e3fde70..24dc4fae1e 100755 --- a/ymir/command/tests/assets/2007_000032.xml +++ b/ymir/command/tests/assets/2007_000032.xml @@ -12,6 +12,12 @@ 3 1 + 0.95 + + sunny + camera 0 + blue sky + aeroplane Frontal @@ -22,8 +28,15 @@ 78 375 183 + 0.22 - 0.5 + 0.5 + 0.62 + + 0 + white + Frontal + aeroplane @@ -35,7 +48,14 @@ 88 197 123 + 0.02 + 0.75 + + 0 + blue + Left + person @@ -47,7 +67,13 @@ 180 213 229 + 0.0 + 0.23 + + 1 + Rear + PERSON @@ -59,6 +85,12 @@ 189 44 238 + 0.12 + 0.35 + + 1 + Rear + diff --git a/ymir/command/tests/assets/2007_000243.xml b/ymir/command/tests/assets/2007_000243.xml index 7378c8bccb..a6be2d96dd 100755 --- a/ymir/command/tests/assets/2007_000243.xml +++ b/ymir/command/tests/assets/2007_000243.xml @@ -12,6 +12,12 @@ 3 1 + 0.83 + + rainy + camera 1 + gray sky + aeroplane Unspecified @@ -22,6 +28,13 @@ 127 274 193 + -0.02 + 0.75 + + 0 + pink + Unspecified + diff --git a/ymir/command/tests/assets/pred_meta.yaml b/ymir/command/tests/assets/pred_meta.yaml new file mode 100644 index 0000000000..63fb30d85d --- /dev/null +++ b/ymir/command/tests/assets/pred_meta.yaml @@ -0,0 +1,13 @@ +eval_class_names: + - person + - cat + - airplane +model: + mean_average_precision: 0.6 + class_names: + - person + - cat + - airplane + - unknown-cat +executor_config: + stage_name: default diff --git a/ymir/command/tests/unit/test_cmd_branch_log.py b/ymir/command/tests/unit/test_cmd_branch_log.py deleted file mode 100644 index 0fc5a6d761..0000000000 --- a/ymir/command/tests/unit/test_cmd_branch_log.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import shutil -import unittest - -from mir.commands.branch import CmdBranch -from mir.commands.log import CmdLog -from mir.tools.code import MirCode - -from tests import utils as test_utils - - -class TestCmdBranchAndLog(unittest.TestCase): - # life cycle - def __init__(self, methodName: str) -> None: - super().__init__(methodName=methodName) - self._mir_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - - def setUp(self) -> None: - self.__prepare_dir(self._mir_root) - self.__prepare_mir_repo(self._mir_root) - return super().setUp() - - def tearDown(self) -> None: - self.__deprepare_dir(self._mir_root) - return super().tearDown() - - # private: prepare - def __prepare_dir(self, mir_root: str): - if os.path.isdir(mir_root): - shutil.rmtree(mir_root) - os.makedirs(mir_root, exist_ok=True) - - def __deprepare_dir(self, mir_root: str): - if os.path.isdir(mir_root): - shutil.rmtree(mir_root) - - def __prepare_mir_repo(self, mir_root: str): - test_utils.mir_repo_init(self._mir_root) - - # public: test cases - def test_00(self): - # test branch - fake_args = type('', (), {})() - fake_args.mir_root = self._mir_root - fake_args.force_delete = None - cmd_instance = CmdBranch(fake_args) - self.assertEqual(MirCode.RC_OK, cmd_instance.run()) - - fake_args.force_delete = 'master' - cmd_instance = CmdBranch(fake_args) - self.assertNotEqual(MirCode.RC_OK, cmd_instance.run()) - - # test log - fake_args = type('', (), {})() - fake_args.mir_root = self._mir_root - fake_args.decorate = False - fake_args.oneline = False - fake_args.graph = False - fake_args.dog = False - cmd_instance = CmdLog(fake_args) - self.assertEqual(MirCode.RC_OK, cmd_instance.run()) diff --git a/ymir/command/tests/unit/test_cmd_copy.py b/ymir/command/tests/unit/test_cmd_copy.py index 387f695f29..96fb7021bd 100644 --- a/ymir/command/tests/unit/test_cmd_copy.py +++ b/ymir/command/tests/unit/test_cmd_copy.py @@ -1,6 +1,6 @@ import os import shutil -from typing import List +from typing import List, Set import unittest from mir.commands import copy @@ -50,31 +50,34 @@ def __prepare_src_mir(self): test_utils.mir_repo_init(self._src_mir_root) test_utils.mir_repo_create_branch(self._src_mir_root, 'a') + self.__prepare_src_mir_branch_a(task_id='t0') + self.__prepare_src_mir_branch_a(task_id='t1', eval_class_ids=[0, 1]) + def __prepare_src_mir_branch_a(self, task_id: str, eval_class_ids: List[int] = []) -> None: mir_metadatas = mirpb.MirMetadatas() mir_metadatas.attributes['asset0'] + mir_metadatas.attributes['asset0'].width = 30 + mir_metadatas.attributes['asset0'].height = 30 mir_metadatas.attributes['asset1'] + mir_metadatas.attributes['asset1'].width = 30 + mir_metadatas.attributes['asset1'].height = 30 mir_annotations = mirpb.MirAnnotations() - mir_annotations.head_task_id = 't0' - mir_annotations.task_annotations['t0'] - mir_annotations.task_annotations['t0'].image_annotations['asset0'].CopyFrom( + mir_annotations.prediction.image_annotations['asset0'].CopyFrom( self.__create_image_annotations(type_ids=[1, 2, 3])) - mir_annotations.task_annotations['t0'].image_annotations['asset1'].CopyFrom( + mir_annotations.prediction.image_annotations['asset1'].CopyFrom( self.__create_image_annotations(type_ids=[3])) + mir_annotations.prediction.eval_class_ids[:] = eval_class_ids - mir_keywords = mirpb.MirKeywords() - mir_keywords.keywords['asset0'].predifined_keyids.extend([1, 2, 3]) - mir_keywords.keywords['asset1'].predifined_keyids.extend([3]) - + model_meta = mirpb.ModelMeta(mean_average_precision=0.3) task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeTraining, - task_id='t0', + task_id=task_id, message='training', - model_mAP=0.3) + model_meta=model_meta) mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._src_mir_root, mir_branch='a', - his_branch='master', + his_branch='a', mir_datas={ mirpb.MirStorage.MIR_METADATAS: mir_metadatas, mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, @@ -84,14 +87,15 @@ def __prepare_src_mir(self): def __create_image_annotations(self, type_ids: List[int]) -> mirpb.SingleImageAnnotations: single_image_annotations = mirpb.SingleImageAnnotations() for idx, type_id in enumerate(type_ids): - annotation = mirpb.Annotation() + annotation = mirpb.ObjectAnnotation() annotation.index = idx annotation.class_id = type_id - single_image_annotations.annotations.append(annotation) + single_image_annotations.boxes.append(annotation) return single_image_annotations # private: check results - def __check_results(self, dst_branch: str, dst_tid: str, ignore_unknown_types: bool, drop_annotations: bool): + def __check_results(self, dst_branch: str, dst_tid: str, ignore_unknown_types: bool, drop_annotations: bool, + eval_class_ids_set: Set[int] = set()): [mir_metadatas, mir_annotations, mir_keywords, mir_tasks, _] = mir_storage_ops.MirStorageOps.load_multiple_storages( mir_root=self._mir_root, @@ -103,25 +107,21 @@ def __check_results(self, dst_branch: str, dst_tid: str, ignore_unknown_types: b metadatas_keys = set(mir_metadatas.attributes.keys()) self.assertEqual({'asset0', 'asset1'}, metadatas_keys) - self.assertEqual(dst_tid, mir_annotations.head_task_id) if drop_annotations: - self.assertEqual(0, len(mir_annotations.task_annotations[dst_tid].image_annotations)) + self.assertEqual(0, len(mir_annotations.prediction.image_annotations)) else: asset0_idx_ids = { annotation.index: annotation.class_id - for annotation in mir_annotations.task_annotations[dst_tid].image_annotations['asset0'].annotations + for annotation in mir_annotations.prediction.image_annotations['asset0'].boxes } asset1_idx_ids = { annotation.index: annotation.class_id - for annotation in mir_annotations.task_annotations[dst_tid].image_annotations['asset1'].annotations + for annotation in mir_annotations.prediction.image_annotations['asset1'].boxes } self.assertEqual({0: 2, 1: 1}, asset0_idx_ids) self.assertEqual({}, asset1_idx_ids) + self.assertEqual(eval_class_ids_set, set(mir_annotations.prediction.eval_class_ids)) - self.assertEqual({1, 2}, set(mir_keywords.keywords['asset0'].predifined_keyids)) - self.assertEqual(set(), set(mir_keywords.keywords['asset1'].predifined_keyids)) - - self.assertEqual(dst_tid, mir_tasks.head_task_id) mAP = mir_tasks.tasks[dst_tid].model.mean_average_precision self.assertTrue(mAP > 0.29999 and mAP < 0.30001) # it's actually 0.3 @@ -183,3 +183,19 @@ def test_normal_00(self): cmd_copy = copy.CmdCopy(fake_args) return_code = cmd_copy.run() self.assertNotEqual(MirCode.RC_OK, return_code) + + def test_normal_01(self) -> None: + # test cases for pred meta + fake_args = type('', (), {})() + fake_args.mir_root = self._mir_root + fake_args.data_mir_root = self._src_mir_root + fake_args.data_src_revs = 'a@t1' + fake_args.dst_rev = 'b@t1' + fake_args.work_dir = self._work_dir + fake_args.ignore_unknown_types = True + fake_args.drop_annotations = False + cmd_copy = copy.CmdCopy(fake_args) + return_code = cmd_copy.run() + self.assertEqual(MirCode.RC_OK, return_code) + self.__check_results(dst_branch='b', dst_tid='t1', ignore_unknown_types=True, drop_annotations=False, + eval_class_ids_set={0, 2}) diff --git a/ymir/command/tests/unit/test_cmd_evaluate.py b/ymir/command/tests/unit/test_cmd_evaluate.py deleted file mode 100644 index 58cab98077..0000000000 --- a/ymir/command/tests/unit/test_cmd_evaluate.py +++ /dev/null @@ -1,283 +0,0 @@ -import os -import shutil -import unittest - -from google.protobuf import json_format - -from mir.commands import evaluate -from mir.protos import mir_command_pb2 as mirpb -from mir.tools import mir_storage_ops -from mir.tools.code import MirCode -from tests import utils as test_utils - - -class TestCmdEvaluate(unittest.TestCase): - # life cycle - def __init__(self, methodName: str = ...) -> None: - super().__init__(methodName) - self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) - self._working_root = os.path.join(self._test_root, 'work') - self._mir_root = os.path.join(self._test_root, 'mir-root') - - def setUp(self) -> None: - self._prepare_dirs() - test_utils.prepare_labels(mir_root=self._mir_root, names=['person', 'cat', 'tv']) - self._prepare_mir_repo() - return super().setUp() - - def tearDown(self) -> None: - self._deprepare_dirs() - return super().tearDown() - - # protected: setup and teardown - def _prepare_dirs(self) -> None: - test_utils.remake_dirs(self._test_root) - test_utils.remake_dirs(self._working_root) - test_utils.remake_dirs(self._mir_root) - - def _prepare_mir_repo(self) -> None: - test_utils.mir_repo_init(self._mir_root) - self._prepare_mir_repo_branch_a() - self._prepare_mir_repo_branch_b() - - def _prepare_mir_repo_branch_a(self) -> None: - metadatas_dict = { - 'attributes': { - 'a0': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - }, - 'a1': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - }, - 'a2': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - } - } - } - mir_metadatas = mirpb.MirMetadatas() - json_format.ParseDict(metadatas_dict, mir_metadatas) - - annotations_dict = { - 'task_annotations': { - 'a': { - 'image_annotations': { - 'a0': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 50, - 'y': 50, - 'w': 50, - 'h': 50, - }, - 'class_id': 0, - 'score': 1, - }, { - 'index': 1, - 'box': { - 'x': 150, - 'y': 50, - 'w': 75, - 'h': 75, - }, - 'class_id': 0, - 'score': 1, - }, { - 'index': 2, - 'box': { - 'x': 150, - 'y': 150, - 'w': 75, - 'h': 75, - }, - 'class_id': 1, - 'score': 1, - }, { - 'index': 3, - 'box': { - 'x': 350, - 'y': 50, - 'w': 100, - 'h': 100, - }, - 'class_id': 2, - 'score': 1, - }] - }, - 'a1': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 300, - 'y': 300, - 'w': 100, - 'h': 100, - }, - 'class_id': 2, - 'score': 1, - }] - }, - } - } - }, - 'head_task_id': 'a' - } - mir_annotations = mirpb.MirAnnotations() - json_format.ParseDict(annotations_dict, mir_annotations) - - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') - mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, - mir_branch='a', - his_branch='master', - mir_datas={ - mirpb.MirStorage.MIR_METADATAS: mir_metadatas, - mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, - }, - task=task) - - def _prepare_mir_repo_branch_b(self) -> None: - metadatas_dict = { - 'attributes': { - 'a0': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - }, - 'a1': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - }, - 'a2': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - } - } - } - mir_metadatas = mirpb.MirMetadatas() - json_format.ParseDict(metadatas_dict, mir_metadatas) - - annotations_dict = { - 'task_annotations': { - 'b': { - 'image_annotations': { - 'a0': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 50, - 'y': 50, - 'w': 50, - 'h': 50, - }, - 'class_id': 0, - 'score': 0.7, - }, { - 'index': 1, - 'box': { - 'x': 150, - 'y': 50, - 'w': 75, - 'h': 75, - }, - 'class_id': 0, - 'score': 0.8, - }, { - 'index': 2, - 'box': { - 'x': 150, - 'y': 150, - 'w': 75, - 'h': 75, - }, - 'class_id': 1, - 'score': 0.9, - }, { - 'index': 3, - 'box': { - 'x': 350, - 'y': 50, - 'w': 100, - 'h': 100, - }, - 'class_id': 2, - 'score': 0.9, - }] - }, - 'a1': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 300, - 'y': 300, - 'w': 100, - 'h': 100, - }, - 'class_id': 2, - 'score': 0.9, - }] - }, - } - } - }, - 'head_task_id': 'b' - } - mir_annotations = mirpb.MirAnnotations() - json_format.ParseDict(annotations_dict, mir_annotations) - - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='b', message='import') - mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, - mir_branch='b', - his_branch='master', - mir_datas={ - mirpb.MirStorage.MIR_METADATAS: mir_metadatas, - mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, - }, - task=task) - - def _deprepare_dirs(self) -> None: - if os.path.isdir(self._test_root): - shutil.rmtree(self._test_root) - - # public: test cases - def test_00(self) -> None: - fake_args = type('', (), {})() - fake_args.mir_root = self._mir_root - fake_args.work_dir = self._working_root - fake_args.src_revs = 'a;b' - fake_args.gt_rev = 'b' - fake_args.dst_rev = 'c@c' - fake_args.conf_thr = 0.3 - fake_args.iou_thrs = '0.5:0.95:0.05' - fake_args.need_pr_curve = False - evaluate_instance = evaluate.CmdEvaluate(fake_args) - return_code = evaluate_instance.run() - - self.assertEqual(return_code, MirCode.RC_OK) - - # check evaluation result - mir_tasks: mirpb.MirTasks = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=self._mir_root, - mir_branch='c', - mir_task_id='c', - ms=mirpb.MirStorage.MIR_TASKS) - evaluation_result = mir_tasks.tasks[mir_tasks.head_task_id].evaluation - self.assertEqual({'a', 'b'}, set(evaluation_result.dataset_evaluations.keys())) diff --git a/ymir/command/tests/unit/test_cmd_export.py b/ymir/command/tests/unit/test_cmd_export.py index 8ac280b4e9..1ca2e791c7 100644 --- a/ymir/command/tests/unit/test_cmd_export.py +++ b/ymir/command/tests/unit/test_cmd_export.py @@ -1,15 +1,15 @@ import os import shutil -from typing import Dict, List, Tuple +from typing import List, Tuple import unittest -from unittest import mock from google.protobuf import json_format -from mir.commands import exporting +from mir.commands import export from mir.protos import mir_command_pb2 as mirpb -from mir.tools import data_exporter, hash_utils, mir_storage_ops +from mir.tools import mir_storage_ops, mir_storage from mir.tools.code import MirCode +from mir.tools.mir_storage import sha1sum_for_file from tests import utils as test_utils @@ -20,6 +20,7 @@ def __init__(self, methodName: str) -> None: self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) self._assets_location = os.path.join(self._test_root, 'assets_location') self._dest_root = os.path.join(self._test_root, 'export_dest') + self._gt_root = os.path.join(self._dest_root, 'gt_dir') self._mir_root = os.path.join(self._test_root, 'mir-repo') def setUp(self) -> None: @@ -49,10 +50,11 @@ def __prepare_assets(self): copy all assets from project to assets_location, assumes that `self._assets_location` already created ''' image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg'] - sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path) + sha1sum_path_pairs = [(sha1sum_for_file(image_path), image_path) for image_path in image_paths] # type: List[Tuple[str, str]] for sha1sum, image_path in sha1sum_path_pairs: - shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum)) + shutil.copyfile(image_path, + mir_storage.get_asset_storage_path(self._assets_location, sha1sum)) def __prepare_mir_repo(self): ''' @@ -83,87 +85,104 @@ def __prepare_mir_repo(self): # annotations annotations_dict = { - 'task_annotations': { - 'a': { - 'image_annotations': { - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 104, - 'y': 78, - 'w': 272, - 'h': 105 - }, - 'class_id': 3, - 'score': 1, - }, { - 'index': 1, - 'box': { - 'x': 133, - 'y': 88, - 'w': 65, - 'h': 36 - }, - 'class_id': 3, - 'score': 1, - }, { - 'index': 2, - 'box': { - 'x': 195, - 'y': 180, - 'w': 19, - 'h': 50 - }, - 'class_id': 2, - 'score': 1, - }, { - 'index': 3, - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 95 - }, - 'class_id': 2, - 'score': 1, - }] - }, - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 181, - 'y': 127, - 'w': 94, - 'h': 67 - }, - 'class_id': 3, - 'score': 1, - }] - }, - } + 'prediction': { + 'task_id': 'a', + 'image_annotations': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 104, + 'y': 78, + 'w': 272, + 'h': 105 + }, + 'class_id': 3, + 'score': 1, + 'anno_quality': 0.95, + 'tags': { + 'fake tag name': 'fake tag data' + }, + }, { + 'index': 1, + 'box': { + 'x': 133, + 'y': 88, + 'w': 65, + 'h': 36 + }, + 'class_id': 3, + 'score': 1, + 'anno_quality': 0.95, + 'tags': { + 'fake tag name': 'fake tag data' + }, + }, { + 'index': 2, + 'box': { + 'x': 195, + 'y': 180, + 'w': 19, + 'h': 50 + }, + 'class_id': 2, + 'score': 1, + 'anno_quality': 0.95, + 'tags': { + 'fake tag name': 'fake tag data' + }, + }, { + 'index': 3, + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 95 + }, + 'class_id': 2, + 'score': 1, + 'anno_quality': 0.95, + 'tags': { + 'fake tag name': 'fake tag data' + }, + }], + }, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 181, + 'y': 127, + 'w': 94, + 'h': 67 + }, + 'class_id': 3, + 'score': 1, + 'anno_quality': 0.95, + 'tags': { + 'fake tag name': 'fake tag data' + }, + }], + }, } - } - } - mir_annotations = mirpb.MirAnnotations() - json_format.ParseDict(annotations_dict, mir_annotations) - - # keywords - keywords_dict = { - 'keywords': { - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'predifined_keyids': [2, 3], - 'customized_keywords': ['pascal'] - }, + }, + 'image_cks': { 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'predifined_keyids': [3], - 'customized_keywords': ['pascal'] + 'cks': { + 'weather': 'sunny', + }, + 'image_quality': 0.5 }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'cks': { + 'weather': 'sunny', + }, + 'image_quality': 0.3 + } } } - mir_keywords = mirpb.MirKeywords() - json_format.ParseDict(keywords_dict, mir_keywords) + mir_annotations = mirpb.MirAnnotations() + json_format.ParseDict(annotations_dict, mir_annotations) # tasks task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, @@ -181,52 +200,35 @@ def __prepare_mir_repo(self): mir_datas=mir_datas, task=task) - # private: mocked - def __mock_export(*args, **kwargs) -> Dict[str, Tuple[str, str]]: - return {} - - # private: test cases - @mock.patch('mir.tools.data_exporter.export', side_effect='__mock_export') - def test_normal_00(self, mock_export): - # normal case + def test_normal_00(self): + # normal case: voc:raw fake_args = type('', (), {})() fake_args.mir_root = self._mir_root fake_args.asset_dir = self._dest_root - fake_args.annotation_dir = self._dest_root + fake_args.pred_dir = self._dest_root + fake_args.gt_dir = self._gt_root fake_args.media_location = self._assets_location fake_args.src_revs = 'a@a' - fake_args.dst_rev = '' - fake_args.format = 'voc' - fake_args.in_cis = 'person' + fake_args.anno_format = 'voc' + fake_args.asset_format = 'raw' + fake_args.class_names = 'person' fake_args.work_dir = '' - runner = exporting.CmdExport(fake_args) + runner = export.CmdExport(fake_args) result = runner.run() self.assertEqual(MirCode.RC_OK, result) - mock_export.assert_called_once_with(mir_root=self._mir_root, - assets_location=self._assets_location, - class_type_ids={2: 2}, - asset_ids={'430df22960b0f369318705800139fcc8ec38a3e4', - 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}, - asset_dir=self._dest_root, - annotation_dir=self._dest_root, - need_ext=True, - need_id_sub_folder=False, - base_branch='a', - base_task_id='a', # see: fake_args.src_revs = 'a@a' - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC) - - # abnormal case + # abnormal case: no asset_dir, pred_dir, media_location fake_args = type('', (), {})() fake_args.mir_root = self._mir_root fake_args.asset_dir = '' - fake_args.annotation_dir = '' + fake_args.pred_dir = '' + fake_args.gt_dir = '' fake_args.media_location = '' fake_args.src_revs = 'a@a' - fake_args.dst_rev = '' # too fast, default task_id will be the same as previous one - fake_args.format = 'voc' - fake_args.in_cis = 'person' + fake_args.anno_format = 'voc' + fake_args.asset_format = 'raw' + fake_args.class_names = 'person' fake_args.work_dir = '' - runner = exporting.CmdExport(fake_args) + runner = export.CmdExport(fake_args) result = runner.run() self.assertNotEqual(MirCode.RC_OK, result) diff --git a/ymir/command/tests/unit/test_cmd_filter.py b/ymir/command/tests/unit/test_cmd_filter.py index da3e69a95d..ddea48a0cd 100644 --- a/ymir/command/tests/unit/test_cmd_filter.py +++ b/ymir/command/tests/unit/test_cmd_filter.py @@ -8,7 +8,6 @@ from mir.commands import filter as cmd_filter from mir.protos import mir_command_pb2 as mirpb -from mir.tools import utils as mir_utils from mir.tools import mir_storage_ops from mir.tools.code import MirCode from mir.tools.mir_storage_ops import MirStorageOps @@ -87,30 +86,66 @@ def __prepare_mir_repo(self, mir_root: str): json_format.ParseDict(metadatas_dict, mir_metadatas) annotations_dict = { - "task_annotations": { - "t0": { - "image_annotations": { - "a0000000000000000000000000000000000000000000000000": - TestCmdFilter.__annotations_for_single_image([0, 1, 2, 3, 4, 5]), - "a0000000000000000000000000000000000000000000000001": - TestCmdFilter.__annotations_for_single_image([4]), - "a0000000000000000000000000000000000000000000000002": - TestCmdFilter.__annotations_for_single_image([3]), - "a0000000000000000000000000000000000000000000000003": - TestCmdFilter.__annotations_for_single_image([2]), - "a0000000000000000000000000000000000000000000000004": - TestCmdFilter.__annotations_for_single_image([0, 1]), - } + "prediction": { + "image_annotations": { + "a0000000000000000000000000000000000000000000000000": + TestCmdFilter.__annotations_for_single_image([0, 1, 2, 3, 4, 5]), + "a0000000000000000000000000000000000000000000000001": + TestCmdFilter.__annotations_for_single_image([4]), + "a0000000000000000000000000000000000000000000000002": + TestCmdFilter.__annotations_for_single_image([3]), + "a0000000000000000000000000000000000000000000000003": + TestCmdFilter.__annotations_for_single_image([2]), + "a0000000000000000000000000000000000000000000000004": + TestCmdFilter.__annotations_for_single_image([0, 1]), + } + }, + "ground_truth": { + "image_annotations": { + "a0000000000000000000000000000000000000000000000000": + TestCmdFilter.__annotations_for_single_image([0, 1, 2, 3, 4, 5]), + "a0000000000000000000000000000000000000000000000001": + TestCmdFilter.__annotations_for_single_image([4, 5]), + "a0000000000000000000000000000000000000000000000002": + TestCmdFilter.__annotations_for_single_image([0, 3]), + "a0000000000000000000000000000000000000000000000003": + TestCmdFilter.__annotations_for_single_image([2]), + "a0000000000000000000000000000000000000000000000004": + TestCmdFilter.__annotations_for_single_image([0, 4]), } }, - 'head_task_id': 't0', + 'image_cks': { + 'a0000000000000000000000000000000000000000000000000': { + 'cks': { + 'c0': 'c1' + } + }, + 'a0000000000000000000000000000000000000000000000001': { + 'cks': { + 'c0': 'c1' + } + }, + 'a0000000000000000000000000000000000000000000000002': { + 'cks': { + 'c0': 'c1' + } + }, + 'a0000000000000000000000000000000000000000000000003': { + 'cks': { + 'c0': 'c1' + } + }, + 'a0000000000000000000000000000000000000000000000004': { + 'cks': { + 'c0': 'c1' + } + }, + } } mir_annotations = mirpb.MirAnnotations() json_format.ParseDict(annotations_dict, mir_annotations) - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, - task_id='t0', - message='import') + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='t0', message='import') MirStorageOps.save_and_commit(mir_root=self._mir_root, mir_branch='a', @@ -136,7 +171,7 @@ def __annotations_for_single_image(type_ids: List[int]) -> Dict[str, list]: "score": 0.5, "class_id": type_id, }) - return {"annotations": annotations} + return {"boxes": annotations} # public: test cases def test_all(self): @@ -155,10 +190,11 @@ def test_all(self): self.assertEqual(MirCode.RC_OK, pipe1[0].recv()) def __test_cmd_filter_normal_01(self): - preds = "frisbee; person; ChAiR" # 0; 2; 15 + preds = "frisbee; person; ChAiR" # 0; 2; 5 excludes = "Cat" # 4 expected_asset_ids = { - "a0000000000000000000000000000000000000000000000003", "a0000000000000000000000000000000000000000000000004" + "a0000000000000000000000000000000000000000000000002", + "a0000000000000000000000000000000000000000000000003", } self.__test_cmd_filter_normal_cases(in_cis=preds, ex_cis=excludes, @@ -187,18 +223,12 @@ def __test_cmd_filter_normal_cases(self, in_cis: str, ex_cis: str, in_cks: str, # check mir repo mir_metadatas = test_utils.read_mir_pb(os.path.join(self._mir_root, 'metadatas.mir'), mirpb.MirMetadatas) mir_annotations = test_utils.read_mir_pb(os.path.join(self._mir_root, 'annotations.mir'), mirpb.MirAnnotations) - mir_keywords = test_utils.read_mir_pb(os.path.join(self._mir_root, 'keywords.mir'), mirpb.MirKeywords) mir_tasks = test_utils.read_mir_pb(os.path.join(self._mir_root, 'tasks.mir'), mirpb.MirTasks) self.assertEqual(expected_asset_ids, set(mir_metadatas.attributes.keys())) - self.assertEqual(expected_asset_ids, set(mir_keywords.keywords.keys())) - self.assertEqual(1, len(mir_annotations.task_annotations)) - self.assertEqual(expected_asset_ids, set(mir_annotations.task_annotations['t1'].image_annotations.keys())) + self.assertEqual(expected_asset_ids, set(mir_annotations.prediction.image_annotations.keys())) + self.assertEqual(expected_asset_ids, set(mir_annotations.image_cks.keys())) self.assertEqual(1, len(mir_tasks.tasks)) self.assertEqual('t1', mir_tasks.head_task_id) - self.assertEqual('t1', mir_annotations.head_task_id) - - current_branch_name = mir_utils.mir_repo_head_name(self._mir_root) - self.assertEqual(dst_branch, current_branch_name) def __test_multiprocess(self, dst_branch: str, child_conn): fake_args = type('', (), {})() diff --git a/ymir/command/tests/unit/test_cmd_import.py b/ymir/command/tests/unit/test_cmd_import.py index fc84e7c7e1..a356947e80 100644 --- a/ymir/command/tests/unit/test_cmd_import.py +++ b/ymir/command/tests/unit/test_cmd_import.py @@ -1,11 +1,12 @@ import logging import os import shutil +from typing import Set import unittest -from google.protobuf.json_format import MessageToDict +from google.protobuf.json_format import MessageToDict, ParseDict -from mir.commands.importing import CmdImport +from mir.commands.import_dataset import CmdImport from mir.protos import mir_command_pb2 as mirpb from mir.tools.code import MirCode from tests import utils as test_utils @@ -18,6 +19,7 @@ class TestCmdImport(unittest.TestCase): def __init__(self, methodName: str) -> None: super().__init__(methodName=methodName) + self.maxDiff = None self._sandbox_root = test_utils.dir_test_root(self.id().split('.')[-3:]) self._user_root = os.path.join(self._sandbox_root, self._USER_NAME) self._mir_repo_root = os.path.join(self._user_root, self._MIR_REPO_NAME) @@ -49,163 +51,309 @@ def test_import_cmd_00(self): args.dst_rev = 'a@import-task-0' args.index_file = self._idx_file args.ck_file = self._ck_file - args.anno = self._data_xml_path - args.gen = gen_folder - args.dataset_name = '' + args.pred_dir = self._data_xml_path + args.gt_dir = self._data_xml_path + args.gen_abs = gen_folder args.work_dir = self._work_dir - args.ignore_unknown_types = False + args.unknown_types_strategy = 'stop' + args.anno_type = 'det-box' importing_instance = CmdImport(args) ret = importing_instance.run() - assert ret == MirCode.RC_OK + self.assertEqual(ret, MirCode.RC_OK) self._check_repo(self._mir_repo_root, with_person_ignored=False, with_annotations=True) # not write person label test_utils.prepare_labels(mir_root=self._mir_repo_root, names=['cat', 'airplane,aeroplane']) # ignore unknown types - args.ignore_unknown_types = True - args.dataset_name = 'import-task-0' + args.unknown_types_strategy = 'ignore' args.dst_rev = 'a@import-task-1' importing_instance = CmdImport(args) ret = importing_instance.run() - assert ret == MirCode.RC_OK - self._check_repo(self._mir_repo_root, with_person_ignored=True, with_annotations=True) + self.assertEqual(ret, MirCode.RC_OK) + self._check_repo(self._mir_repo_root, + with_person_ignored=True, + with_annotations=True, + task_new_types={'person': 3}, + task_new_types_added=False) - # have no annotations - args.anno = None - args.ignore_unknown_types = False - args.dataset_name = 'import-task-0' + # add unknown types + args.unknown_types_strategy = 'add' args.dst_rev = 'a@import-task-2' importing_instance = CmdImport(args) ret = importing_instance.run() - assert ret == MirCode.RC_OK + self.assertEqual(ret, MirCode.RC_OK) + self._check_repo(self._mir_repo_root, + with_person_ignored=False, + with_annotations=True, + task_new_types={'person': 3}, + task_new_types_added=True) + + # have no annotations + args.pred_dir = None + args.gt_dir = None + args.unknown_types_strategy = 'stop' + args.dst_rev = 'a@import-task-3' + importing_instance = CmdImport(args) + ret = importing_instance.run() + self.assertEqual(ret, MirCode.RC_OK) self._check_repo(self._mir_repo_root, with_person_ignored=False, with_annotations=False) # check for relative path, currently should return an error code args.mir_root = 'abc' importing_instance = CmdImport(args) ret = importing_instance.run() - assert ret != MirCode.RC_OK + self.assertNotEqual(ret, MirCode.RC_OK) + args.mir_root = self._mir_repo_root args.index_file = '' - assert CmdImport(args).run() != MirCode.RC_OK + self.assertNotEqual(CmdImport(args).run(), MirCode.RC_OK) args.index_file = self._idx_file - args.anno = '' - assert CmdImport(args).run() != MirCode.RC_OK - args.anno = self._data_xml_path + '/fake-one' - assert CmdImport(args).run() != MirCode.RC_OK - args.anno = self._data_xml_path + args.pred_dir = '' + self.assertEqual(CmdImport(args).run(), MirCode.RC_OK) + args.pred_dir = self._data_xml_path + '/fake-one' + self.assertNotEqual(CmdImport(args).run(), MirCode.RC_OK) + args.pred_dir = self._data_xml_path + + def test_import_cmd_01(self): + shutil.move(os.path.join(self._data_xml_path, 'pred_meta.yaml'), os.path.join(self._data_xml_path, 'meta.yaml')) + # test cases for import prediction meta + mir_root = self._mir_repo_root + gen_folder = os.path.join(self._storage_root, 'gen') + args = type('', (), {})() + args.mir_root = mir_root + args.src_revs = '' + args.dst_rev = 'a@import-task-0' + args.index_file = self._idx_file + args.ck_file = self._ck_file + args.pred_dir = self._data_xml_path + args.gt_dir = self._data_xml_path + args.gen_abs = gen_folder + args.work_dir = self._work_dir + args.unknown_types_strategy = 'stop' + args.anno_type = 'det-box' + importing_instance = CmdImport(args) + ret = importing_instance.run() + self.assertEqual(ret, MirCode.RC_OK) + self._check_repo(self._mir_repo_root, + with_person_ignored=False, + with_annotations=True, + eval_class_ids_set={0, 1, 2}) + shutil.move(os.path.join(self._data_xml_path, 'meta.yaml'), os.path.join(self._data_xml_path, 'pred_meta.yaml')) - def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotations: bool): + def _check_repo(self, + repo_root: str, + with_person_ignored: bool, + with_annotations: bool, + task_new_types: dict = {}, + task_new_types_added: bool = False, + eval_class_ids_set: Set[int] = set()): # check annotations.mir mir_annotations = mirpb.MirAnnotations() with open(os.path.join(repo_root, 'annotations.mir'), 'rb') as f: mir_annotations.ParseFromString(f.read()) - dict_annotations = MessageToDict(mir_annotations, preserving_proto_field_name=True) - task_id = list(dict_annotations['task_annotations'].keys())[0] - dict_annotations = dict_annotations['task_annotations'][task_id] + + dict_asset_cks_expected = { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'cks': { + 'weather': 'rainy', + 'camera': 'camera 1', + 'theme': 'gray sky' + }, + 'image_quality': 0.83 + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'cks': { + 'camera': 'camera 0', + 'theme': 'blue sky', + 'weather': 'sunny' + }, + 'image_quality': 0.95 + } + } if with_person_ignored: - dict_annotations_expect = { - 'image_annotations': { - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'annotations': [{ - 'box': { - 'x': 181, - 'y': 127, - 'w': 94, - 'h': 67 - }, - 'class_id': 1, - 'score': 2.0, - }] - }, - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'annotations': [{ - 'box': { - 'x': 104, - 'y': 78, - 'w': 272, - 'h': 106 - }, - 'class_id': 1, - 'score': 0.5, - }, { - 'index': 1, - 'box': { - 'x': 133, - 'y': 88, - 'w': 65, - 'h': 36 - }, - 'class_id': 1, - 'score': 2.0, - }] - } + dict_image_annotations_expect = { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'boxes': [{ + 'box': { + 'x': 181, + 'y': 127, + 'w': 94, + 'h': 67, + 'rotate_angle': -0.02 + }, + 'class_id': 1, + 'cm': 'FP' if eval_class_ids_set else 'NotSet', + 'det_link_id': -1 if eval_class_ids_set else 0, + 'score': -1.0, + 'anno_quality': 0.75, + 'tags': { + 'difficult': '0', + 'color': 'pink', + 'pose': 'Unspecified' + } + }], + 'img_class_ids': [1], + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'boxes': [{ + 'box': { + 'x': 104, + 'y': 78, + 'w': 272, + 'h': 106, + 'rotate_angle': 0.22 + }, + 'class_id': 1, + 'cm': 'TP' if eval_class_ids_set else 'NotSet', + 'score': 0.5, + 'anno_quality': 0.62, + 'tags': { + 'difficult': '0', + 'color': 'white', + 'pose': 'Frontal' + } + }, { + 'index': 1, + 'box': { + 'x': 133, + 'y': 88, + 'w': 65, + 'h': 36, + 'rotate_angle': 0.02 + }, + 'class_id': 1, + 'cm': 'FP' if eval_class_ids_set else 'NotSet', + 'det_link_id': -1 if eval_class_ids_set else 0, + 'score': -1.0, + 'anno_quality': 0.75, + 'tags': { + 'difficult': '0', + 'color': 'blue', + 'pose': 'Left' + } + }], + 'img_class_ids': [1], } } else: - dict_annotations_expect = { - 'image_annotations': { - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'annotations': [{ - 'box': { - 'x': 181, - 'y': 127, - 'w': 94, - 'h': 67 - }, - 'class_id': 1, - 'score': 2.0, - }] - }, - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'annotations': [{ - 'box': { - 'x': 104, - 'y': 78, - 'w': 272, - 'h': 106 - }, - 'class_id': 1, - 'score': 0.5, - }, { - 'index': 1, - 'box': { - 'x': 133, - 'y': 88, - 'w': 65, - 'h': 36 - }, - 'class_id': 1, - 'score': 2.0, - }, { - 'index': 2, - 'box': { - 'x': 195, - 'y': 180, - 'w': 19, - 'h': 50 - }, - 'class_id': 2, - 'score': 2.0, - }, { - 'index': 3, - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'class_id': 2, - 'score': 2.0, - }] - } + dict_image_annotations_expect = { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'boxes': [{ + 'box': { + 'x': 181, + 'y': 127, + 'w': 94, + 'h': 67, + 'rotate_angle': -0.02 + }, + 'class_id': 1, + 'cm': 'IGNORED' if eval_class_ids_set else 'NotSet', + 'det_link_id': -1 if eval_class_ids_set else 0, + 'score': -1.0, + 'anno_quality': 0.75, + 'tags': { + 'difficult': '0', + 'color': 'pink', + 'pose': 'Unspecified' + } + }], + 'img_class_ids': [1], + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'boxes': [{ + 'box': { + 'x': 104, + 'y': 78, + 'w': 272, + 'h': 106, + 'rotate_angle': 0.22 + }, + 'class_id': 1, + 'cm': 'TP' if eval_class_ids_set else 'NotSet', + 'score': 0.5, + 'anno_quality': 0.62, + 'tags': { + 'difficult': '0', + 'color': 'white', + 'pose': 'Frontal' + } + }, { + 'index': 1, + 'box': { + 'x': 133, + 'y': 88, + 'w': 65, + 'h': 36, + 'rotate_angle': 0.02 + }, + 'class_id': 1, + 'cm': 'IGNORED' if eval_class_ids_set else 'NotSet', + 'det_link_id': -1 if eval_class_ids_set else 0, + 'score': -1.0, + 'anno_quality': 0.75, + 'tags': { + 'difficult': '0', + 'color': 'blue', + 'pose': 'Left' + } + }, { + 'index': 2, + 'box': { + 'x': 195, + 'y': 180, + 'w': 19, + 'h': 50 + }, + 'class_id': 2, + 'cm': 'IGNORED' if eval_class_ids_set else 'NotSet', + 'det_link_id': -1 if eval_class_ids_set else 0, + 'score': -1.0, + 'anno_quality': 0.23, + 'tags': { + 'difficult': '1', + 'pose': 'Rear' + } + }, { + 'index': 3, + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50, + 'rotate_angle': 0.12 + }, + 'class_id': 2, + 'cm': 'IGNORED' if eval_class_ids_set else 'NotSet', + 'det_link_id': -1 if eval_class_ids_set else 0, + 'score': -1.0, + 'anno_quality': 0.35, + 'tags': { + 'difficult': '1', + 'pose': 'Rear' + } + }], + 'img_class_ids': [1, 2], } } - if not with_annotations: - dict_annotations_expect = {} - self.assertDictEqual(dict_annotations_expect, dict_annotations) + mir_annotations_expected = mirpb.MirAnnotations() + if with_annotations: + ParseDict( + { + 'prediction': { + 'image_annotations': dict_image_annotations_expect + }, + 'image_cks': dict_asset_cks_expected, + }, mir_annotations_expected) + + try: + self.assertEqual(mir_annotations_expected.prediction.image_annotations, + mir_annotations.prediction.image_annotations) + self.assertEqual(mir_annotations_expected.image_cks, mir_annotations.image_cks) + self.assertEqual(eval_class_ids_set, set(mir_annotations.prediction.eval_class_ids)) + except AssertionError as e: + raise e # check keywords.mir and contexts.mir mir_keywords = mirpb.MirKeywords() @@ -214,88 +362,496 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation mir_keywords.ParseFromString(f.read()) with open(os.path.join(repo_root, 'context.mir'), 'rb') as f: mir_context.ParseFromString(f.read()) - dict_keywords = MessageToDict(mir_keywords, preserving_proto_field_name=True) - dict_context = MessageToDict(mir_context, preserving_proto_field_name=True, including_default_value_fields=True) if with_annotations: - dup_asset_id = '430df22960b0f369318705800139fcc8ec38a3e4' - dict_keywords['keywords'][dup_asset_id]['predifined_keyids'] = sorted( - dict_keywords['keywords'][dup_asset_id]['predifined_keyids']) # list is unsorted - dup_keywords_id = 1 - dict_keywords['index_predifined_keyids'][dup_keywords_id]['asset_ids'] = sorted( - dict_keywords['index_predifined_keyids'][dup_keywords_id]['asset_ids']) if with_person_ignored: + pred_gt_idx = { + 'cis': { + 1: { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + }, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + } + } + }, + 'tags': { + 'pose': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + } + }, + 'sub_indexes': { + 'Left': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [1] + } + } + }, + 'Unspecified': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + } + }, + 'Frontal': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0] + } + } + } + } + }, + 'difficult': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + } + }, + 'sub_indexes': { + '0': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + } + } + } + } + }, + 'color': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + } + }, + 'sub_indexes': { + 'blue': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [1] + } + } + }, + 'pink': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + } + }, + 'white': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0] + } + } + } + } + } + } + } dict_keywords_expect = { - 'keywords': { - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'predifined_keyids': [1], + 'pred_idx': pred_gt_idx, + 'gt_idx': pred_gt_idx, + 'ck_idx': { + 'theme': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {}, + '430df22960b0f369318705800139fcc8ec38a3e4': {} + }, + 'sub_indexes': { + 'blue sky': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': {} + } + }, + 'gray sky': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + } + } + } + }, + 'weather': { + 'asset_annos': { + '430df22960b0f369318705800139fcc8ec38a3e4': {}, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + }, + 'sub_indexes': { + 'sunny': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': {} + } + }, + 'rainy': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + } + } + } }, - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'predifined_keyids': [1], + 'camera': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {}, + '430df22960b0f369318705800139fcc8ec38a3e4': {} + }, + 'sub_indexes': { + 'camera 1': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + } + }, + 'camera 0': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': {} + } + } + } } + } + } + pred_gt_stats = { + 'total_cnt': 3, + 'positive_asset_cnt': 2, + 'negative_asset_cnt': 0, + 'class_ids_cnt': { + 1: 2, }, - 'index_predifined_keyids': { - 1: { - 'asset_ids': - ['430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'] - } + 'tags_cnt': { + 'difficult': { + 'cnt': 3, + 'sub_cnt': { + '0': 3, + }, + }, + 'color': { + 'cnt': 3, + 'sub_cnt': { + 'white': 1, + 'blue': 1, + 'pink': 1, + }, + }, + 'pose': { + 'cnt': 3, + 'sub_cnt': { + 'Left': 1, + 'Frontal': 1, + 'Unspecified': 1, + }, + }, }, } dict_context_expected = { 'images_cnt': 2, - 'negative_images_cnt': 0, - 'project_negative_images_cnt': 0, - 'predefined_keyids_cnt': { - 1: 2, + 'total_asset_mbytes': 1, + 'cks_cnt': { + 'weather': { + 'cnt': 2, + 'sub_cnt': { + 'sunny': 1, + 'rainy': 1, + }, + }, + 'camera': { + 'cnt': 2, + 'sub_cnt': { + 'camera 0': 1, + 'camera 1': 1, + }, + }, + 'theme': { + 'cnt': 2, + 'sub_cnt': { + 'blue sky': 1, + 'gray sky': 1, + }, + } }, - 'project_predefined_keyids_cnt': {}, - 'customized_keywords_cnt': {}, + 'pred_stats': pred_gt_stats, + 'gt_stats': pred_gt_stats, } else: - dict_keywords_expect = { - 'keywords': { - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'predifined_keyids': [1], + pred_gt_idx = { + 'cis': { + 2: { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [2, 3] + } + } }, - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'predifined_keyids': [1, 2], + 1: { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + } + } } }, - 'index_predifined_keyids': { - 2: { - 'asset_ids': ['430df22960b0f369318705800139fcc8ec38a3e4'] + 'tags': { + 'color': { + 'asset_annos': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + }, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + }, + 'sub_indexes': { + 'pink': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + } + }, + 'white': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0] + } + } + }, + 'blue': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [1] + } + } + } + } }, - 1: { - 'asset_ids': - ['430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'] + 'pose': { + 'asset_annos': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1, 2, 3] + }, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + }, + 'sub_indexes': { + 'Frontal': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0] + } + } + }, + 'Unspecified': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + } + } + }, + 'Left': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [1] + } + } + }, + 'Rear': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [2, 3] + } + } + } + } + }, + 'difficult': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1, 2, 3] + } + }, + 'sub_indexes': { + '1': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [2, 3] + } + } + }, + '0': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'ids': [0] + }, + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'ids': [0, 1] + } + } + } + } + } + } + } + dict_keywords_expect = { + 'pred_idx': pred_gt_idx, + 'gt_idx': pred_gt_idx, + 'ck_idx': { + 'camera': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {}, + '430df22960b0f369318705800139fcc8ec38a3e4': {} + }, + 'sub_indexes': { + 'camera 1': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + } + }, + 'camera 0': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': {} + } + } + } + }, + 'weather': { + 'asset_annos': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {}, + '430df22960b0f369318705800139fcc8ec38a3e4': {} + }, + 'sub_indexes': { + 'rainy': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + } + }, + 'sunny': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': {} + } + } + } + }, + 'theme': { + 'asset_annos': { + '430df22960b0f369318705800139fcc8ec38a3e4': {}, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + }, + 'sub_indexes': { + 'gray sky': { + 'key_ids': { + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': {} + } + }, + 'blue sky': { + 'key_ids': { + '430df22960b0f369318705800139fcc8ec38a3e4': {} + } + } + } } + } + } + pred_gt_stats = { + 'total_cnt': 5, + 'positive_asset_cnt': 2, + 'negative_asset_cnt': 0, + 'class_ids_cnt': { + 1: 2, + 2: 1, + }, + 'tags_cnt': { + 'difficult': { + 'cnt': 5, + 'sub_cnt': { + '0': 3, + '1': 2, + }, + }, + 'color': { + 'cnt': 3, + 'sub_cnt': { + 'white': 1, + 'blue': 1, + 'pink': 1, + }, + }, + 'pose': { + 'cnt': 5, + 'sub_cnt': { + 'Left': 1, + 'Frontal': 1, + 'Unspecified': 1, + 'Rear': 2, + }, + }, }, } dict_context_expected = { 'images_cnt': 2, - 'negative_images_cnt': 0, - 'project_negative_images_cnt': 0, - 'predefined_keyids_cnt': { - 1: 2, - 2: 1, + 'total_asset_mbytes': 1, + 'cks_cnt': { + 'weather': { + 'cnt': 2, + 'sub_cnt': { + 'sunny': 1, + 'rainy': 1, + }, + }, + 'camera': { + 'cnt': 2, + 'sub_cnt': { + 'camera 0': 1, + 'camera 1': 1, + }, + }, + 'theme': { + 'cnt': 2, + 'sub_cnt': { + 'blue sky': 1, + 'gray sky': 1, + }, + } }, - 'project_predefined_keyids_cnt': {}, - 'customized_keywords_cnt': {}, + 'pred_stats': pred_gt_stats, + 'gt_stats': pred_gt_stats, } + mir_keywords_expected = mirpb.MirKeywords() + ParseDict(dict_keywords_expect, mir_keywords_expected) + mir_context_expected = mirpb.MirContext() + mir_context_expected.pred_stats.eval_class_ids[:] = eval_class_ids_set + ParseDict(dict_context_expected, mir_context_expected) try: - self.assertDictEqual(dict_keywords, dict_keywords_expect) - except AssertionError as e: - logging.info(f"expected: {dict_keywords_expect}") - logging.info(f"actual: {dict_keywords}") - raise e - try: - self.assertDictEqual(dict_context, dict_context_expected) + self.assertEqual(mir_keywords, mir_keywords_expected) + self.assertEqual(mir_context, mir_context_expected) except AssertionError as e: - logging.info(f"expected: {dict_context_expected}") - logging.info(f"actual: {dict_context}") raise e else: - self.assertEqual(0, len(dict_keywords)) - self.assertEqual(0, len(dict_context['predefined_keyids_cnt'])) + self.assertEqual(0, len(mir_keywords.pred_idx.cis)) + self.assertEqual(0, len(mir_context.pred_stats.class_ids_cnt)) # check metadatas.mir mir_metadatas = mirpb.MirMetadatas() @@ -305,14 +861,12 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation dict_metadatas_expect = { 'attributes': { '430df22960b0f369318705800139fcc8ec38a3e4': { - 'dataset_name': 'import-task-0', 'asset_type': 'AssetTypeImageJpeg', 'width': 500, 'height': 281, 'image_channels': 3 }, 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'dataset_name': 'import-task-0', 'asset_type': 'AssetTypeImageJpeg', 'width': 500, 'height': 333, @@ -330,10 +884,12 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation mir_tasks = mirpb.MirTasks() with open(os.path.join(repo_root, 'tasks.mir'), 'rb') as f: mir_tasks.ParseFromString(f.read()) - dict_tasks = MessageToDict(mir_tasks, preserving_proto_field_name=True) - assert ('import-task-0' in dict_tasks['tasks'] - or 'import-task-1' in dict_tasks['tasks'] - or 'import-task-2' in dict_tasks['tasks']) + self.assertTrue({'import-task-0', 'import-task-1', 'import-task-2', 'import-task-3'} & mir_tasks.tasks.keys()) + + task = mir_tasks.tasks[mir_tasks.head_task_id] + task_dict = MessageToDict(task, preserving_proto_field_name=True) + self.assertEqual(task_dict.get('new_types', {}), task_new_types) + self.assertEqual(task_dict.get('new_types_added', False), task_new_types_added) # custom: env prepare def _prepare_dirs(self): @@ -349,6 +905,7 @@ def _prepare_dirs(self): os.makedirs(self._data_root) self._idx_file = os.path.join(self._data_root, 'idx.txt') + self._gt_idx_file = os.path.join(self._data_root, 'gt_idx.txt') self._ck_file = os.path.join(self._data_root, 'ck.tsv') self._data_img_path = os.path.join(self._data_root, 'img') os.makedirs(self._data_img_path) @@ -357,22 +914,24 @@ def _prepare_dirs(self): self._prepare_data(data_root=self._data_root, idx_file=self._idx_file, + gt_idx_file=self._gt_idx_file, ck_file=self._ck_file, data_img_path=self._data_img_path, data_xml_path=self._data_xml_path) - def _prepare_data(self, data_root, idx_file, ck_file, data_img_path, data_xml_path): + def _prepare_data(self, data_root, idx_file, gt_idx_file, ck_file, data_img_path, data_xml_path): local_data_root = 'tests/assets' # Copy img files. img_files = ['2007_000032.jpg', '2007_000243.jpg'] - with open(idx_file, 'w') as idx_f, open(ck_file, 'w') as ck_f: + with open(idx_file, 'w') as idx_f, open(gt_idx_file, 'w') as gt_idx_f, open(ck_file, 'w') as ck_f: for file in img_files: src = os.path.join(local_data_root, file) dst = os.path.join(data_img_path, file) shutil.copyfile(src, dst) idx_f.writelines(dst + '\n') + gt_idx_f.writelines(dst + '\n') ck_f.write(f"{dst}\tck0\n") # Copy xml files. @@ -382,12 +941,11 @@ def _prepare_data(self, data_root, idx_file, ck_file, data_img_path, data_xml_pa dst = os.path.join(data_xml_path, file) shutil.copyfile(src, dst) + # Copy meta file + shutil.copyfile(os.path.join(local_data_root, 'pred_meta.yaml'), os.path.join(data_xml_path, 'pred_meta.yaml')) + def _prepare_mir_repo(self): # init repo test_utils.mir_repo_init(self._mir_repo_root) # prepare branch a test_utils.mir_repo_create_branch(self._mir_repo_root, 'a') - - -if __name__ == '__main__': - unittest.main() diff --git a/ymir/command/tests/unit/test_cmd_import_model.py b/ymir/command/tests/unit/test_cmd_import_model.py index 6af7e64d07..bf4a8374ba 100644 --- a/ymir/command/tests/unit/test_cmd_import_model.py +++ b/ymir/command/tests/unit/test_cmd_import_model.py @@ -1,13 +1,16 @@ import os import shutil import tarfile +import time import unittest import yaml -from mir.commands.model_importing import CmdModelImport -from mir.tools import mir_storage_ops, settings as mir_settings, utils as mir_utils +from mir.commands.import_model import CmdModelImport +from mir.protos import mir_command_pb2 as mirpb +from mir.tools import mir_storage_ops, models, settings as mir_settings from mir.tools.code import MirCode +from mir.version import YMIR_VERSION from tests import utils as test_utils @@ -52,16 +55,22 @@ def _prepare_model(self): with open(os.path.join(self._src_model_root, 'best.weights'), 'w') as f: f.write('fake darknet weights model') # note: unknown-car is not in user labels, we still expect it success - model_storage = mir_utils.ModelStorage(models=['best.weights'], - executor_config={'class_names': ['cat', 'person', 'unknown-car']}, - task_context={ - mir_settings.PRODUCER_KEY: mir_settings.PRODUCER_NAME, - 'mAP': 0.5 - }) + mss = models.ModelStageStorage(stage_name='default_best_stage', + files=['best.weights'], + mAP=0.5, + timestamp=int(time.time())) + model_storage = models.ModelStorage(executor_config={'class_names': ['cat', 'person', 'unknown-car']}, + task_context={ + mir_settings.PRODUCER_KEY: mir_settings.PRODUCER_NAME, + 'mAP': 0.5 + }, + stages={mss.stage_name: mss}, + best_stage_name=mss.stage_name, + package_version=YMIR_VERSION) with open(os.path.join(self._src_model_root, 'ymir-info.yaml'), 'w') as f: - yaml.safe_dump(model_storage.as_dict(), f) + yaml.safe_dump(model_storage.dict(), f) with tarfile.open(self._src_model_package_path, 'w:gz') as tar_gz_f: - tar_gz_f.add(os.path.join(self._src_model_root, 'best.weights'), 'best.weights') + tar_gz_f.add(os.path.join(self._src_model_root, 'best.weights'), f"{mss.stage_name}/best.weights") tar_gz_f.add(os.path.join(self._src_model_root, 'ymir-info.yaml'), 'ymir-info.yaml') def _prepare_mir_repo(self): @@ -70,10 +79,10 @@ def _prepare_mir_repo(self): # protected: check result def _check_result(self): """ check destination model package file """ - model = mir_storage_ops.MirStorageOps.load_single_model(mir_root=self._mir_root, - mir_branch='a', - mir_task_id='a') - self.assertTrue(os.path.isfile(os.path.join(self._models_location, model['model_hash']))) + mir_storage_data: mirpb.MirTasks = mir_storage_ops.MirStorageOps.load_single_storage( + mir_root=self._mir_root, mir_branch='a', ms=mirpb.MirStorage.MIR_TASKS, mir_task_id='a') + task = mir_storage_data.tasks[mir_storage_data.head_task_id] + self.assertTrue(os.path.isfile(os.path.join(self._models_location, task.model.model_hash))) # public: test cases def test_00(self): diff --git a/ymir/command/tests/unit/test_cmd_infer.py b/ymir/command/tests/unit/test_cmd_infer.py index 520641ecbd..fcff6757c1 100644 --- a/ymir/command/tests/unit/test_cmd_infer.py +++ b/ymir/command/tests/unit/test_cmd_infer.py @@ -2,13 +2,15 @@ import os import shutil import tarfile +import time import unittest from unittest import mock +from mir.version import YMIR_VERSION import yaml from mir.commands.infer import CmdInfer -from mir.tools import settings as mir_settings, utils as mir_utils +from mir.tools import models, settings as mir_settings from mir.tools.code import MirCode from tests import utils as test_utils @@ -19,11 +21,11 @@ def __init__(self, methodName: str = ...) -> None: super().__init__(methodName=methodName) self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) self._mir_repo_root = os.path.join(self._test_root, 'mir-demo-repo') - self._models_location = os.path.join(self._test_root, 'models') - self._src_assets_root = os.path.join(self._test_root, 'assets') # source assets, index and infer config file self._working_root = os.path.join(self._test_root, 'work') # work directory for cmd infer - self._config_file = os.path.join(self._test_root, 'config.yaml') - self._assets_index_file = os.path.join(self._src_assets_root, 'index.tsv') + self._models_location = os.path.join(self._working_root, 'models') + self._src_assets_root = os.path.join(self._working_root, 'assets') # source assets, index and infer config file + self._config_file = os.path.join(self._working_root, 'config.yaml') + self._assets_index_file = os.path.join(self._working_root, 'index.tsv') def setUp(self) -> None: self._prepare_dir() @@ -40,6 +42,8 @@ def tearDown(self) -> None: # protected: setup and teardown def _prepare_dir(self): + if os.path.isdir(self._test_root): + shutil.rmtree(self._test_root) os.makedirs(self._test_root, exist_ok=True) os.makedirs(self._models_location, exist_ok=True) os.makedirs(self._working_root, exist_ok=True) @@ -58,7 +62,7 @@ def _prepare_assets(self): shutil.copyfile(src=os.path.join(test_assets_root, '2007_000032.jpg'), dst=os.path.join(self._working_root, '2007_000032.jpg')) with open(self._assets_index_file, 'w') as f: - f.write(f'{self._working_root}/2007_000032.jpg\n') + f.write(f'{self._src_assets_root}/2007_000032.jpg\n') def _prepare_model(self): # model params @@ -77,20 +81,27 @@ def _prepare_model(self): training_config['anchors'] = '12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401' training_config['class_names'] = ['person', 'cat', 'unknown-car'] - model_storage = mir_utils.ModelStorage(models=['model.params', 'model.json'], - executor_config=training_config, - task_context={ - 'src_revs': 'master', - 'dst_rev': 'a' - }) + model_stage = models.ModelStageStorage(stage_name='default_best_stage', + files=['model.params', 'model.json'], + mAP=0.5, + timestamp=int(time.time())) + model_storage = models.ModelStorage(executor_config=training_config, + task_context={ + 'src_revs': 'master', + 'dst_rev': 'a' + }, + stages={model_stage.stage_name: model_stage}, + best_stage_name=model_stage.stage_name, + package_version=YMIR_VERSION) with open(os.path.join(self._models_location, 'ymir-info.yaml'), 'w') as f: - yaml.dump(model_storage.as_dict(), f) + yaml.dump(model_storage.dict(), f) # pack model with tarfile.open(os.path.join(self._models_location, 'fake_model_hash'), "w:gz") as dest_tar_gz: - dest_tar_gz.add(os.path.join(self._models_location, 'model.params'), 'model.params') - dest_tar_gz.add(os.path.join(self._models_location, 'model.json'), 'model.json') + dest_tar_gz.add(os.path.join(self._models_location, 'model.params'), + f"{model_stage.stage_name}/model.params") + dest_tar_gz.add(os.path.join(self._models_location, 'model.json'), f"{model_stage.stage_name}/model.json") dest_tar_gz.add(os.path.join(self._models_location, 'ymir-info.yaml'), 'ymir-info.yaml') def _prepare_config_file(self): @@ -100,7 +111,7 @@ def _prepare_config_file(self): executor_config = yaml.safe_load(f) with open(self._config_file, 'w') as f: yaml.safe_dump({mir_settings.EXECUTOR_CONFIG_KEY: executor_config}, f) - + def _prepare_infer_result_file(self): fake_infer_output_dict = { 'detection': { @@ -139,26 +150,24 @@ def test_00(self, mock_run): fake_args.work_dir = self._working_root fake_args.mir_root = self._mir_repo_root fake_args.model_location = self._models_location - fake_args.model_hash = 'fake_model_hash' + fake_args.model_hash_stage = 'fake_model_hash@default_best_stage' fake_args.index_file = self._assets_index_file fake_args.config_file = self._config_file fake_args.executor = 'infer-executor:fake' fake_args.executant_name = 'executor-instance' + fake_args.run_as_root = False cmd_instance = CmdInfer(fake_args) cmd_result = cmd_instance.run() # check running result self.assertEqual(MirCode.RC_OK, cmd_result) - expected_cmd = ['nvidia-docker', 'run', '--rm'] - expected_cmd.append(f"-v{fake_args.work_dir}:/in/assets:ro") - expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in', 'models')}:/in/models:ro") - expected_cmd.append( - f"-v{os.path.join(fake_args.work_dir, 'in', 'candidate-index.tsv')}:/in/candidate-index.tsv") - expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in', 'config.yaml')}:/in/config.yaml") - expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in', 'env.yaml')}:/in/env.yaml") + expected_cmd = ['docker', 'run', '--rm'] + expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'in')}:/in:ro") expected_cmd.append(f"-v{os.path.join(fake_args.work_dir, 'out')}:/out") + expected_cmd.append(f"-v{self._src_assets_root}:{self._src_assets_root}") expected_cmd.extend(['--user', f"{os.getuid()}:{os.getgid()}"]) + expected_cmd.append("--shm-size=16G") expected_cmd.extend(['--name', fake_args.executant_name]) expected_cmd.append(fake_args.executor) mock_run.assert_called_once_with(expected_cmd, check=True, stdout=mock.ANY, stderr=mock.ANY, text=True) @@ -176,4 +185,4 @@ def test_00(self, mock_run): self.assertTrue('model_params_path' in infer_config) # check model params - self.assertTrue(os.path.isfile(os.path.join(fake_args.work_dir, 'in', 'models', 'model.params'))) + self.assertTrue(os.path.isfile(os.path.join(fake_args.work_dir, 'in', 'models', 'default_best_stage', 'model.params'))) diff --git a/ymir/command/tests/unit/test_cmd_merge.py b/ymir/command/tests/unit/test_cmd_merge.py index 264ebce50a..c7924c71fd 100644 --- a/ymir/command/tests/unit/test_cmd_merge.py +++ b/ymir/command/tests/unit/test_cmd_merge.py @@ -1,7 +1,7 @@ import logging import os import shutil -from typing import Dict, List, Tuple +from typing import Any, Dict, List, Tuple import unittest from google.protobuf.json_format import MessageToDict, ParseDict @@ -54,12 +54,8 @@ def _prepare_mir_repo(self): self._prepare_mir_branch_a() test_utils.mir_repo_create_branch(self._mir_root, "b") self._prepare_mir_branch_b() - # test_utils.mir_repo_create_branch(self._mir_root, "c") - # self._prepare_mir_branch_c() test_utils.mir_repo_create_branch(self._mir_root, "d") self._prepare_mir_branch_d() - # test_utils.mir_repo_create_branch(self._mir_root, "e") - # self._prepare_mir_branch_e() test_utils.mir_repo_checkout(self._mir_root, "master") @staticmethod @@ -76,11 +72,16 @@ def _generate_attribute_for_asset(width: int, height: int, tvt_type: int = mirpb } @staticmethod - def _generate_annotations_for_asset(type_ids: List[int], x: int, y: int): + def _generate_annotations_for_asset(type_ids: List[int], + x: int, + y: int, + cm: int = mirpb.ConfusionMatrixType.NotSet): annotations_list = [] for idx, type_id in enumerate(type_ids): annotations_list.append({ 'class_id': type_id, + 'cm': cm, + 'det_link_id': -1, 'box': { 'x': idx * 100 + x, 'y': y, @@ -88,11 +89,11 @@ def _generate_annotations_for_asset(type_ids: List[int], x: int, y: int): 'h': 50 }, }) - return {'annotations': annotations_list} + return {'boxes': annotations_list, 'img_class_ids': type_ids} @staticmethod def _generate_keywords_for_asset(predefined: List[int], customized: List[str]): - return {'predifined_keyids': predefined, 'customized_keywords': customized} + return {'predefined_keyids': predefined} @staticmethod def _generate_task(task_id: str, name: str, type: int, timestamp: int): @@ -109,35 +110,40 @@ def _generate_task(task_id: str, name: str, type: int, timestamp: int): def _prepare_mir_branch(self, assets_and_keywords: Dict[str, Tuple[List[int], List[str]]], size: int, branch_name_and_task_id: str, commit_msg: str): mir_annotations = mirpb.MirAnnotations() - mir_keywords = mirpb.MirKeywords() mir_metadatas = mirpb.MirMetadatas() - dict_metadatas = {'attributes': {}} + dict_metadatas: Dict[str, Any] = {'attributes': {}} for asset_id in assets_and_keywords: dict_metadatas["attributes"][asset_id] = TestMergeCmd._generate_attribute_for_asset(size, size) ParseDict(dict_metadatas, mir_metadatas) image_annotations = {} + image_cks = {} + class_ids_set = set() for asset_idx, (asset_id, keywords_pair) in enumerate(assets_and_keywords.items()): image_annotations[asset_id] = TestMergeCmd._generate_annotations_for_asset(type_ids=keywords_pair[0], x=100, y=(asset_idx + 1) * 100) + image_cks[asset_id] = {'cks': keywords_pair[1]} + class_ids_set.update(keywords_pair[0]) + pred = { + 'task_id': branch_name_and_task_id, + "image_annotations": image_annotations, + "eval_class_ids": list(class_ids_set), + 'task_class_ids': list(class_ids_set), + } + gt = { + 'task_id': branch_name_and_task_id, + "image_annotations": image_annotations, + 'task_class_ids': list(class_ids_set), + } dict_annotations = { - "task_annotations": { - branch_name_and_task_id: { - "image_annotations": image_annotations - } - }, - 'head_task_id': branch_name_and_task_id + "prediction": pred, + 'ground_truth': gt, + 'image_cks': image_cks, } ParseDict(dict_annotations, mir_annotations) - dict_keywords = {"keywords": {}} - for asset_id, keywords_pair in assets_and_keywords.items(): - dict_keywords["keywords"][asset_id] = TestMergeCmd._generate_keywords_for_asset( - keywords_pair[0], keywords_pair[1]) - ParseDict(dict_keywords, mir_keywords) - task = mir_storage_ops.create_task(task_type=mirpb.TaskTypeMining, task_id=branch_name_and_task_id, message=commit_msg) @@ -157,10 +163,18 @@ def _prepare_mir_branch_a(self): all asset size set to (1000, 1000) """ assets_and_keywords = { - "a0": ([1], ["c0", "c1"]), - "a1": ([1], ["c0", "c1"]), - "a2": ([1], ["c0", "c1"]), - "a3": ([1], ["c0", "c1"]), + "a0": ([1], { + "c0": "c1" + }), + "a1": ([1], { + "c0": "c1" + }), + "a2": ([1], { + "c0": "c1" + }), + "a3": ([1], { + "c0": "c1" + }), } self._prepare_mir_branch(assets_and_keywords=assets_and_keywords, size=1000, @@ -174,9 +188,15 @@ def _prepare_mir_branch_b(self): all asset size set to (1100, 1100) """ assets_and_keywords = { - "b0": ([2], ["c0", "c2"]), - "b1": ([2], ["c0", "c2"]), - "b2": ([2], ["c0", "c2"]), + "b0": ([2], { + "c0": "c2" + }), + "b1": ([2], { + "c0": "c2" + }), + "b2": ([2], { + "c0": "c2" + }), } self._prepare_mir_branch(assets_and_keywords=assets_and_keywords, size=1100, @@ -190,9 +210,15 @@ def _prepare_mir_branch_d(self): all asset size set to (1300, 1300) """ assets_and_keywords = { - "a0": ([1, 2], ["c0", "c1", "c2"]), - "d0": ([1, 4], ["c0", "c1", "c4"]), - "d1": ([1, 4], ["c0", "c1", "c4"]), + "a0": ([1, 2], { + "c0": "c4" + }), + "d0": ([1, 4], { + "c0": "c4" + }), + "d1": ([1, 4], { + "c0": "c4" + }), } self._prepare_mir_branch(assets_and_keywords=assets_and_keywords, size=1300, @@ -200,10 +226,7 @@ def _prepare_mir_branch_d(self): commit_msg="prepare_branch_merge_d") # protected: check - def _check_result(self, - expected_dict_metadatas=None, - expected_dict_annotations=None, - expected_dict_keywords=None): + def _check_result(self, expected_dict_metadatas=None, expected_dict_annotations=None): if expected_dict_metadatas: try: mir_metadatas = test_utils.read_mir_pb(os.path.join(self._mir_root, "metadatas.mir"), @@ -219,26 +242,15 @@ def _check_result(self, try: mir_annotations = test_utils.read_mir_pb(os.path.join(self._mir_root, "annotations.mir"), mirpb.MirAnnotations) - actual_dict_annotations = MessageToDict(mir_annotations, preserving_proto_field_name=True) + actual_dict_annotations = MessageToDict(mir_annotations, + preserving_proto_field_name=True, + use_integers_for_enums=True) self.assertEqual(expected_dict_annotations, actual_dict_annotations) except AssertionError as e: logging.info(f"e: {expected_dict_annotations}") logging.info(f"a: {actual_dict_annotations}") raise e - if expected_dict_keywords: - mir_keywords = test_utils.read_mir_pb(os.path.join(self._mir_root, "keywords.mir"), mirpb.MirKeywords) - actual_dict_keywords = MessageToDict(mir_keywords, preserving_proto_field_name=True) - for asset_id, expected_keywords in expected_dict_keywords["keywords"].items(): - actual_keywords = actual_dict_keywords["keywords"][asset_id] - try: - self.assertEqual(set(expected_keywords["predifined_keyids"]), - set(actual_keywords["predifined_keyids"])) - except AssertionError as e: - logging.info(f"e: {expected_keywords}") - logging.info(f"a: {actual_keywords}") - raise e - # public: test cases def test_all(self): self._test_exclude_no_tvt_host_00() @@ -255,7 +267,7 @@ def _test_no_tvt_stop_00(self): fake_args.mir_root = mir_root fake_args.src_revs = 'b;a' fake_args.ex_src_revs = '' - fake_args.dst_rev = '_test_no_tvt_stop_00@merge-task-id' + fake_args.dst_rev = '_test_no_tvt_stop_00@merge-task-id-s0' fake_args.strategy = 'stop' fake_args.work_dir = '' merge_instance = CmdMerge(fake_args) @@ -276,35 +288,76 @@ def _test_no_tvt_stop_00(self): } } + expected_pred = { + 'task_id': 'merge-task-id-s0', + "image_annotations": { + "a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100, cm=mirpb.ConfusionMatrixType.IGNORED), + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.IGNORED), + "b0": TestMergeCmd._generate_annotations_for_asset([2], 100, 100, cm=mirpb.ConfusionMatrixType.IGNORED), + "b1": TestMergeCmd._generate_annotations_for_asset([2], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "b2": TestMergeCmd._generate_annotations_for_asset([2], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + }, + 'task_class_ids': [1, 2], + 'model': {}, + 'eval_class_ids': [1, 2], + } + expected_gt = { + 'task_id': 'merge-task-id-s0', + "image_annotations": { + "a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100, cm=mirpb.ConfusionMatrixType.FN), + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.FN), + "b0": TestMergeCmd._generate_annotations_for_asset([2], 100, 100, cm=mirpb.ConfusionMatrixType.FN), + "b1": TestMergeCmd._generate_annotations_for_asset([2], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "b2": TestMergeCmd._generate_annotations_for_asset([2], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + }, + 'task_class_ids': [1, 2], + } expected_dict_annotations = { - "task_annotations": { - "merge-task-id": { - "image_annotations": { - "a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100), - "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200), - "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300), - "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400), - "b0": TestMergeCmd._generate_annotations_for_asset([2], 100, 100), - "b1": TestMergeCmd._generate_annotations_for_asset([2], 100, 200), - "b2": TestMergeCmd._generate_annotations_for_asset([2], 100, 300), + "prediction": expected_pred, + 'ground_truth': expected_gt, + 'image_cks': { + 'a0': { + 'cks': { + 'c0': 'c1', + } + }, + 'a1': { + 'cks': { + 'c0': 'c1', + } + }, + 'a2': { + 'cks': { + 'c0': 'c1', + } + }, + 'a3': { + 'cks': { + 'c0': 'c1', + } + }, + 'b0': { + 'cks': { + 'c0': 'c2', + } + }, + 'b1': { + 'cks': { + 'c0': 'c2', + } + }, + 'b2': { + 'cks': { + 'c0': 'c2', } } - }, - 'head_task_id': 'merge-task-id', - } - - expected_dict_keywords = { - "keywords": { - "a0": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "b0": TestMergeCmd._generate_keywords_for_asset([2], ["c0", "c2"]), - "b1": TestMergeCmd._generate_keywords_for_asset([2], ["c0", "c2"]), - "b2": TestMergeCmd._generate_keywords_for_asset([2], ["c0", "c2"]), } } - self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords) + self._check_result(expected_dict_metadatas, expected_dict_annotations) def _test_tvt_stop_01(self): """ abnormal case: with tvt flag assigned, strategy stop, a + d, have joint assets """ @@ -313,7 +366,7 @@ def _test_tvt_stop_01(self): fake_args.mir_root = mir_root fake_args.src_revs = 'tr:a;va:d' fake_args.ex_src_revs = '' - fake_args.dst_rev = "_test_tvt_stop_01@merge-task-id" + fake_args.dst_rev = "_test_tvt_stop_01@merge-task-id-s1" fake_args.strategy = 'stop' fake_args.work_dir = '' merge_instance = CmdMerge(fake_args) @@ -329,7 +382,7 @@ def _test_tvt_host_00(self): fake_args.mir_root = mir_root fake_args.src_revs = 'tr:a;va:d' fake_args.ex_src_revs = '' - fake_args.dst_rev = '_test_tvt_host_00@merge-task-id' + fake_args.dst_rev = '_test_tvt_host_00@merge-task-id-h0' fake_args.strategy = 'host' fake_args.work_dir = '' merge_instance = CmdMerge(fake_args) @@ -349,33 +402,70 @@ def _test_tvt_host_00(self): } } + expected_pred = { + 'task_id': 'merge-task-id-h0', + "image_annotations": { + "a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100, cm=mirpb.ConfusionMatrixType.IGNORED), + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.IGNORED), + "d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + }, + 'task_class_ids': [1, 4], + 'model': {}, + 'eval_class_ids': [1, 2, 4], + } + expected_gt = { + 'task_id': 'merge-task-id-h0', + "image_annotations": { + "a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100, cm=mirpb.ConfusionMatrixType.FN), + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.FN), + "d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + }, + 'task_class_ids': [1, 4], + } expected_dict_annotations = { - "task_annotations": { - "merge-task-id": { - "image_annotations": { - "a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100), - "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200), - "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300), - "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400), - "d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200), - "d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300), + "prediction": expected_pred, + 'ground_truth': expected_gt, + 'image_cks': { + 'a0': { + 'cks': { + 'c0': 'c1' } - } + }, + 'a1': { + 'cks': { + 'c0': 'c1' + } + }, + 'a2': { + 'cks': { + 'c0': 'c1' + } + }, + 'a3': { + 'cks': { + 'c0': 'c1' + } + }, + 'd0': { + 'cks': { + 'c0': 'c4' + } + }, + 'd1': { + 'cks': { + 'c0': 'c4' + } + }, }, - 'head_task_id': 'merge-task-id', } - expected_dict_keywords = { - "keywords": { - "a0": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "d0": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]), - "d1": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]), - } - } - self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords) + self._check_result(expected_dict_metadatas, expected_dict_annotations) def _test_tvt_guest_00(self): """ normal case: with tvt flag assigned, strategy guest, a + d, have joint assets """ @@ -384,7 +474,7 @@ def _test_tvt_guest_00(self): fake_args.mir_root = mir_root fake_args.src_revs = 'tr:a;va:d' fake_args.ex_src_revs = '' - fake_args.dst_rev = '_test_tvt_guest_00@merge-task-id' + fake_args.dst_rev = '_test_tvt_guest_00@merge-task-id-g0' fake_args.strategy = 'guest' fake_args.work_dir = '' merge_instance = CmdMerge(fake_args) @@ -404,34 +494,70 @@ def _test_tvt_guest_00(self): } } + expected_pred = { + 'task_id': 'merge-task-id-g0', + "image_annotations": { + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.IGNORED), + "a0": TestMergeCmd._generate_annotations_for_asset([1, 2], 100, 100, cm=mirpb.ConfusionMatrixType.IGNORED), + "d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + }, + 'task_class_ids': [1, 2, 4], + 'model': {}, + 'eval_class_ids': [1, 2, 4], + } + expected_gt = { + 'task_id': 'merge-task-id-g0', + "image_annotations": { + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.FN), + "a0": TestMergeCmd._generate_annotations_for_asset([1, 2], 100, 100, cm=mirpb.ConfusionMatrixType.FN), + "d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + }, + 'task_class_ids': [1, 2, 4], + } expected_dict_annotations = { - "task_annotations": { - "merge-task-id": { - "image_annotations": { - "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200), - "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300), - "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400), - "a0": TestMergeCmd._generate_annotations_for_asset([1, 2], 100, 100), - "d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200), - "d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300), + "prediction": expected_pred, + 'ground_truth': expected_gt, + 'image_cks': { + 'a0': { + 'cks': { + 'c0': 'c4' } - } + }, + 'a1': { + 'cks': { + 'c0': 'c1' + } + }, + 'a2': { + 'cks': { + 'c0': 'c1' + } + }, + 'a3': { + 'cks': { + 'c0': 'c1' + } + }, + 'd0': { + 'cks': { + 'c0': 'c4' + } + }, + 'd1': { + 'cks': { + 'c0': 'c4' + } + }, }, - 'head_task_id': 'merge-task-id', - } - - expected_dict_keywords = { - "keywords": { - "a0": TestMergeCmd._generate_keywords_for_asset([1, 2], ["c0", "c1", "c2"]), - "a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "d0": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]), - "d1": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]), - } } - self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords) + self._check_result(expected_dict_metadatas, expected_dict_annotations) def _test_exclude_no_tvt_host_00(self): """ a - d with host strategy """ @@ -440,7 +566,7 @@ def _test_exclude_no_tvt_host_00(self): fake_args.mir_root = mir_root fake_args.src_revs = 'tr:a' fake_args.ex_src_revs = 'd' - fake_args.dst_rev = '_test_exclude_no_tvt_host_00@merge-task-id' + fake_args.dst_rev = '_test_exclude_no_tvt_host_00@merge-task-id-nth0' fake_args.strategy = 'host' fake_args.work_dir = '' merge_instance = CmdMerge(fake_args) @@ -457,25 +583,46 @@ def _test_exclude_no_tvt_host_00(self): } } - expected_dict_annotations = { - "task_annotations": { - "merge-task-id": { - "image_annotations": { - "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200), - "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300), - "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400), - } - } + expected_pred = { + 'task_id': 'merge-task-id-nth0', + "image_annotations": { + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.IGNORED), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.IGNORED), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.IGNORED), }, - 'head_task_id': 'merge-task-id', + 'task_class_ids': [1], + 'model': {}, + 'eval_class_ids': [1], } - - expected_dict_keywords = { - "keywords": { - "a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), - "a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]), + expected_gt = { + 'task_id': 'merge-task-id-nth0', + "image_annotations": { + "a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200, cm=mirpb.ConfusionMatrixType.FN), + "a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300, cm=mirpb.ConfusionMatrixType.FN), + "a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400, cm=mirpb.ConfusionMatrixType.FN), + }, + 'task_class_ids': [1], + } + expected_dict_annotations = { + "prediction": expected_pred, + 'ground_truth': expected_gt, + 'image_cks': { + 'a1': { + 'cks': { + 'c0': 'c1' + } + }, + 'a2': { + 'cks': { + 'c0': 'c1' + } + }, + 'a3': { + 'cks': { + 'c0': 'c1' + } + }, } } - self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords) + self._check_result(expected_dict_metadatas, expected_dict_annotations) diff --git a/ymir/command/tests/unit/test_cmd_mining.py b/ymir/command/tests/unit/test_cmd_mining.py index 655dcfc074..cf868c2a6f 100644 --- a/ymir/command/tests/unit/test_cmd_mining.py +++ b/ymir/command/tests/unit/test_cmd_mining.py @@ -3,14 +3,16 @@ import os import shutil import tarfile +import time import unittest from unittest import mock from google.protobuf.json_format import ParseDict +from mir.version import YMIR_VERSION, ymir_model_salient_version import yaml from mir.commands.mining import CmdMining -from mir.tools import mir_storage_ops, settings as mir_settings, utils as mir_utils +from mir.tools import mir_storage_ops, models, settings as mir_settings, mir_storage import mir.protos.mir_command_pb2 as mirpb import tests.utils as test_utils @@ -51,7 +53,7 @@ def _mock_run_func(*args, **kwargs): fake_infer_output_dict = { 'detection': { 'd4e4a60147f1e35bc7f5bc89284aa16073b043c9': { - 'annotations': [ + 'boxes': [ { 'box': { 'x': 0, @@ -61,7 +63,8 @@ def _mock_run_func(*args, **kwargs): }, 'score': 0.5, 'class_name': 'cat', - }, { + }, + { 'box': { 'x': 50, 'y': 0, @@ -81,10 +84,15 @@ def _mock_run_func(*args, **kwargs): return 0 def _mock_prepare_model(*args, **kwargs): - model_storage = mir_utils.ModelStorage(models=['0.params'], - executor_config={'class_names': ['person', 'cat', 'unknown-car']}, - task_context={'task_id': '0'}) - return model_storage + mss = models.ModelStageStorage(stage_name='default', files=['0.params'], mAP=0.5, timestamp=int(time.time())) + ms = models.ModelStorage(executor_config={'class_names': ['person', 'cat', 'unknown-car']}, + task_context={'task_id': '0'}, + stages={mss.stage_name: mss}, + best_stage_name=mss.stage_name, + model_hash='xyz', + stage_name=mss.stage_name, + package_version=ymir_model_salient_version(YMIR_VERSION)) + return ms # protected: custom: env prepare def _prepare_dirs(self): @@ -113,11 +121,12 @@ def _prepare_mir_repo(self): def _prepare_mir_repo_branch_mining(self): mir_annotations = mirpb.MirAnnotations() mir_metadatas = mirpb.MirMetadatas() - mir_tasks = mirpb.MirTasks() - mock_image_file = os.path.join(self._storage_root, 'd4e4a60147f1e35bc7f5bc89284aa16073b043c9') + mock_image_file = mir_storage.get_asset_storage_path(self._storage_root, + 'd4e4a60147f1e35bc7f5bc89284aa16073b043c9') shutil.copyfile("tests/assets/2007_000032.jpg", mock_image_file) - mock_image_file = os.path.join(self._storage_root, 'a3008c032eb11c8d9ffcb58208a36682ee40900f') + mock_image_file = mir_storage.get_asset_storage_path(self._storage_root, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f') shutil.copyfile("tests/assets/2007_000243.jpg", mock_image_file) mock_training_config_file = os.path.join(self._storage_root, 'config.yaml') @@ -172,7 +181,7 @@ def _prepare_mir_repo_branch_mining(self): # public: test cases @mock.patch("mir.commands.infer.CmdInfer.run_with_args", side_effect=_mock_run_func) - @mock.patch("mir.tools.utils.prepare_model", side_effect=_mock_prepare_model) + @mock.patch("mir.tools.models.prepare_model", side_effect=_mock_prepare_model) def test_mining_cmd_00(self, mock_prepare, mock_run): self._prepare_dirs() self._prepare_config() @@ -181,32 +190,44 @@ def test_mining_cmd_00(self, mock_prepare, mock_run): args = type('', (), {})() args.src_revs = 'a@5928508c-1bc0-43dc-a094-0352079e39b5' args.dst_rev = 'a@mining-task-id' - args.model_hash = 'xyz' + args.model_hash_stage = 'xyz@default' args.work_dir = os.path.join(self._storage_root, "mining-task-id") args.asset_cache_dir = '' args.model_location = self._storage_root args.media_location = self._storage_root args.topk = 1 - args.add_annotations = True + args.add_prediction = True args.mir_root = self._mir_repo_root args.config_file = self._config_file args.executor = 'al:0.0.1' args.executant_name = 'executor-instance' + args.run_as_root = False mining_instance = CmdMining(args) mining_instance.run() + expected_model_storage = TestMiningCmd._mock_prepare_model() + mir_annotations: mirpb.MirAnnotations = mir_storage_ops.MirStorageOps.load_single_storage( + mir_root=self._mir_repo_root, + mir_branch='a', + mir_task_id='mining-task-id', + ms=mirpb.MirStorage.MIR_ANNOTATIONS, + as_dict=False) + self.assertEqual({0, 1}, set(mir_annotations.prediction.eval_class_ids)) + # dont care about timestamp + expected_model_storage.stages['default'].timestamp = mir_annotations.prediction.model.stages[ + 'default'].timestamp + self.assertEqual(expected_model_storage.get_model_meta(), mir_annotations.prediction.model) mock_run.assert_called_once_with(work_dir=args.work_dir, mir_root=args.mir_root, media_path=os.path.join(args.work_dir, 'in', 'assets'), - model_location=args.model_location, - model_hash=args.model_hash, + model_storage=expected_model_storage, index_file=os.path.join(args.work_dir, 'in', 'candidate-src-index.tsv'), config_file=args.config_file, task_id='mining-task-id', - shm_size='16G', executor=args.executor, executant_name=args.executant_name, - run_infer=args.add_annotations, + run_as_root=args.run_as_root, + run_infer=args.add_prediction, run_mining=True) if os.path.isdir(self._sandbox_root): diff --git a/ymir/command/tests/unit/test_cmd_training.py b/ymir/command/tests/unit/test_cmd_training.py index c8ec7ac4a1..3e8676c68c 100644 --- a/ymir/command/tests/unit/test_cmd_training.py +++ b/ymir/command/tests/unit/test_cmd_training.py @@ -1,17 +1,19 @@ import os import shutil +import time from typing import List, Tuple import unittest from unittest import mock from google.protobuf import json_format -from mir.tools.errors import MirRuntimeError import yaml from mir.commands import training from mir.protos import mir_command_pb2 as mirpb -from mir.tools import hash_utils, mir_repo_utils, mir_storage_ops, settings as mir_settings, utils as mir_utils +from mir.tools import mir_storage_ops, models, settings as mir_settings, mir_storage from mir.tools.code import MirCode +from mir.tools.mir_storage import sha1sum_for_file +from mir.version import ymir_model_salient_version, YMIR_VERSION from tests import utils as test_utils @@ -23,6 +25,7 @@ def __init__(self, methodName: str) -> None: self._assets_location = os.path.join(self._test_root, "assets") self._models_location = os.path.join(self._test_root, "models") self._working_root = os.path.join(self._test_root, "work") + self._assets_cache = os.path.join(self._test_root, 'cache') self._mir_root = os.path.join(self._test_root, "mir-root") self._config_file = os.path.join(self._test_root, 'config.yaml') @@ -44,7 +47,7 @@ def __prepare_dirs(self): test_utils.remake_dirs(self._models_location) test_utils.remake_dirs(self._mir_root) test_utils.remake_dirs(self._working_root) - + def __prepare_mir_repo(self): self.__prepare_mir_repo_branch_a() self.__prepare_mir_repo_branch_b() @@ -80,72 +83,71 @@ def __prepare_mir_repo_branch_a(self): # annotations annotations_dict = { - "task_annotations": { - "a": { - "image_annotations": { - "430df22960b0f369318705800139fcc8ec38a3e4": { - "annotations": [{ - "index": 0, - "box": { - "x": 104, - "y": 78, - "w": 272, - "h": 105 - }, - "class_id": 3, - "score": 1, - }, { - "index": 1, - "box": { - "x": 133, - "y": 88, - "w": 65, - "h": 36 - }, - "class_id": 3, - "score": 1, - }, { - "index": 2, - "box": { - "x": 195, - "y": 180, - "w": 19, - "h": 50 - }, - "class_id": 2, - "score": 1, - }, { - "index": 3, - "box": { - "x": 26, - "y": 189, - "w": 19, - "h": 95 - }, - "class_id": 2, - "score": 1, - }] - }, - "a3008c032eb11c8d9ffcb58208a36682ee40900f": { - "annotations": [{ - "index": 0, - "box": { - "x": 181, - "y": 127, - "w": 94, - "h": 67 - }, - "class_id": 3, - "score": 1, - }] - }, - } + "prediction": { + 'task_id': 'a', + "image_annotations": { + "430df22960b0f369318705800139fcc8ec38a3e4": { + "boxes": [{ + "index": 0, + "box": { + "x": 104, + "y": 78, + "w": 272, + "h": 105 + }, + "class_id": 3, + "score": 1, + }, { + "index": 1, + "box": { + "x": 133, + "y": 88, + "w": 65, + "h": 36 + }, + "class_id": 3, + "score": 1, + }, { + "index": 2, + "box": { + "x": 195, + "y": 180, + "w": 19, + "h": 50 + }, + "class_id": 2, + "score": 1, + }, { + "index": 3, + "box": { + "x": 26, + "y": 189, + "w": 19, + "h": 95 + }, + "class_id": 2, + "score": 1, + }] + }, + "a3008c032eb11c8d9ffcb58208a36682ee40900f": { + "boxes": [{ + "index": 0, + "box": { + "x": 181, + "y": 127, + "w": 94, + "h": 67 + }, + "class_id": 3, + "score": 1, + }] + }, } }, - 'head_task_id': 'a' } mir_annotations = mirpb.MirAnnotations() json_format.ParseDict(annotations_dict, mir_annotations) + mir_annotations.ground_truth.CopyFrom(mir_annotations.prediction) # save and commit task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') @@ -198,10 +200,11 @@ def __prepare_assets(self): copy all assets from project to assets_location, assumes that `self._assets_location` already created """ image_paths = ["tests/assets/2007_000032.jpg", "tests/assets/2007_000243.jpg"] - sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path) + sha1sum_path_pairs = [(sha1sum_for_file(image_path), image_path) for image_path in image_paths] # type: List[Tuple[str, str]] for sha1sum, image_path in sha1sum_path_pairs: - shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum)) + shutil.copyfile(image_path, + mir_storage.get_asset_storage_path(self._assets_location, sha1sum)) shutil.copyfile('tests/assets/training-template.yaml', self._config_file) with open(self._config_file, 'r') as f: @@ -216,16 +219,29 @@ def __deprepare_dirs(self): if os.path.isdir(self._test_root): shutil.rmtree(self._test_root) - # private: mocks - def __mock_run_train_cmd(*args, **kwargs): - return MirCode.RC_OK + # protected: mocked functions + def _mock_run_docker_cmd(*args, **kwargs): + pass def __mock_process_model_storage(*args, **kwargs): - return ("xyz", 0.9) + mss = models.ModelStageStorage(stage_name='default', + files=['default.weights'], + mAP=0.9, + timestamp=int(time.time())) + ms = models.ModelStorage(executor_config={'class_names': ['cat']}, + task_context={ + 'src_revs': 'a@a', + 'dst_rev': 'a@test_training_cmd' + }, + stages={mss.stage_name: mss}, + best_stage_name=mss.stage_name, + model_hash='xyz', + package_version=ymir_model_salient_version(YMIR_VERSION)) + return ms # public: test cases - @mock.patch("mir.commands.training._run_train_cmd", side_effect=__mock_run_train_cmd) - @mock.patch("mir.commands.training._process_model_storage", side_effect=__mock_process_model_storage) + @mock.patch('subprocess.run', side_effect=_mock_run_docker_cmd) + @mock.patch("mir.commands.training._find_and_save_model", side_effect=__mock_process_model_storage) def test_normal_00(self, *mock_run): """ normal case """ fake_args = type('', (), {})() @@ -234,7 +250,7 @@ def test_normal_00(self, *mock_run): fake_args.mir_root = self._mir_root fake_args.model_path = self._models_location fake_args.media_location = self._assets_location - fake_args.model_hash = '' + fake_args.model_hash_stage = '' fake_args.work_dir = self._working_root fake_args.force = True fake_args.force_rebuild = False @@ -243,32 +259,10 @@ def test_normal_00(self, *mock_run): fake_args.tensorboard_dir = '' fake_args.config_file = self._config_file fake_args.asset_cache_dir = '' + fake_args.run_as_root = False cmd = training.CmdTrain(fake_args) cmd_run_result = cmd.run() # check result self.assertEqual(MirCode.RC_OK, cmd_run_result) - - def test_abnormal_00(self): - """ no training set """ - fake_args = type('', (), {})() - fake_args.src_revs = "a@b" - fake_args.dst_rev = "b@test_training_cmd" - fake_args.mir_root = self._mir_root - fake_args.model_path = self._models_location - fake_args.media_location = self._assets_location - fake_args.model_hash = '' - fake_args.work_dir = self._working_root - fake_args.force = True - fake_args.force_rebuild = False - fake_args.executor = "executor" - fake_args.executant_name = 'executor-instance' - fake_args.tensorboard_dir = '' - fake_args.config_file = self._config_file - fake_args.asset_cache_dir = '' - - cmd = training.CmdTrain(fake_args) - with self.assertRaises(MirRuntimeError): - cmd_run_result = cmd.run() - self.assertTrue(mir_repo_utils.mir_check_branch_exists(self._mir_root, 'b@test_training_cmd')) diff --git a/ymir/command/tests/unit/test_tools_ark_data_exporter.py b/ymir/command/tests/unit/test_tools_ark_data_exporter.py index f710dadd12..434bbbe5de 100644 --- a/ymir/command/tests/unit/test_tools_ark_data_exporter.py +++ b/ymir/command/tests/unit/test_tools_ark_data_exporter.py @@ -6,7 +6,8 @@ from google.protobuf import json_format from mir.protos import mir_command_pb2 as mirpb -from mir.tools import data_exporter, hash_utils, mir_storage_ops +from mir.tools import exporter, mir_storage_ops, revs_parser, mir_storage +from mir.tools.mir_storage import sha1sum_for_file from tests import utils as test_utils @@ -26,7 +27,7 @@ def setUp(self) -> None: return super().setUp() def tearDown(self) -> None: - # self.__deprepare_dirs() + self.__deprepare_dirs() return super().tearDown() # private: prepare env @@ -45,10 +46,11 @@ def __prepare_assets(self): copy all assets from project to assets_location, assumes that `self._assets_location` already created ''' image_paths = ['tests/assets/2007_000032.jpg', 'tests/assets/2007_000243.jpg'] - sha1sum_path_pairs = [(hash_utils.sha1sum_for_file(image_path), image_path) + sha1sum_path_pairs = [(sha1sum_for_file(image_path), image_path) for image_path in image_paths] # type: List[Tuple[str, str]] for sha1sum, image_path in sha1sum_path_pairs: - shutil.copyfile(image_path, os.path.join(self._assets_location, sha1sum)) + shutil.copyfile(image_path, + mir_storage.get_asset_storage_path(self._assets_location, sha1sum)) def __prepare_mir_repo(self): ''' @@ -64,13 +66,14 @@ def __prepare_mir_repo(self): 'assetType': 'AssetTypeImageJpeg', 'width': 500, 'height': 281, - 'imageChannels': 3 + 'imageChannels': 3, + 'tvtType': 'TvtTypeTraining', }, 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { 'assetType': 'AssetTypeImageJpeg', 'width': 500, 'height': 333, - 'imageChannels': 3 + 'imageChannels': 3, } } } @@ -79,93 +82,74 @@ def __prepare_mir_repo(self): # annotations annotations_dict = { - 'task_annotations': { - 'a': { - 'image_annotations': { - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 104, - 'y': 78, - 'w': 272, - 'h': 105 - }, - 'class_id': 52, - 'score': 1, - }, { - 'index': 1, - 'box': { - 'x': 133, - 'y': 88, - 'w': 65, - 'h': 36 - }, - 'class_id': 52, - 'score': 1, - }, { - 'index': 2, - 'box': { - 'x': 195, - 'y': 180, - 'w': 19, - 'h': 50 - }, - 'class_id': 2, - 'score': 1, - }, { - 'index': 3, - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 95 - }, - 'class_id': 2, - 'score': 1, - }] - }, - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 181, - 'y': 127, - 'w': 94, - 'h': 67 - }, - 'class_id': 52, - 'score': 1, - }] - }, - } + 'prediction': { + 'task_id': 'a', + 'image_annotations': { + '430df22960b0f369318705800139fcc8ec38a3e4': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 104, + 'y': 78, + 'w': 272, + 'h': 105 + }, + 'class_id': 52, + 'score': 1, + }, { + 'index': 1, + 'box': { + 'x': 133, + 'y': 88, + 'w': 65, + 'h': 36 + }, + 'class_id': 52, + 'score': 1, + }, { + 'index': 2, + 'box': { + 'x': 195, + 'y': 180, + 'w': 19, + 'h': 50 + }, + 'class_id': 2, + 'score': 1, + }, { + 'index': 3, + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 95 + }, + 'class_id': 2, + 'score': 1, + }] + }, + 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 181, + 'y': 127, + 'w': 94, + 'h': 67 + }, + 'class_id': 52, + 'score': 1, + }] + }, } }, - 'head_task_id': 'a', } mir_annotations = mirpb.MirAnnotations() json_format.ParseDict(annotations_dict, mir_annotations) - - # keywords - keywords_dict = { - 'keywords': { - '430df22960b0f369318705800139fcc8ec38a3e4': { - 'predifined_keyids': [2, 52], - 'customized_keywords': ['pascal'] - }, - 'a3008c032eb11c8d9ffcb58208a36682ee40900f': { - 'predifined_keyids': [52], - 'customized_keywords': ['pascal'] - }, - } - } - mir_keywords = mirpb.MirKeywords() - json_format.ParseDict(keywords_dict, mir_keywords) + mir_annotations.ground_truth.CopyFrom(mir_annotations.prediction) # task - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, - task_id='a', - message='import') + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') # save and commit mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, @@ -178,17 +162,7 @@ def __prepare_mir_repo(self): task=task) # private: check result - def __check_result(self, asset_ids, format_type, export_path, index_file_path): - # check files - for asset_id in asset_ids: - asset_path = os.path.join(export_path, asset_id + '.jpeg') - self.assertTrue(os.path.isfile(asset_path)) - if format_type == data_exporter.ExportFormat.EXPORT_FORMAT_ARK: - annotation_path = os.path.join(export_path, asset_id + '.txt') - elif format_type == data_exporter.ExportFormat.EXPORT_FORMAT_VOC: - annotation_path = os.path.join(export_path, asset_id + '.xml') - self.assertTrue(os.path.isfile(annotation_path)) - + def __check_result(self, asset_ids: List[str], export_path: str, index_file_path: str): # index file exists self.assertTrue(os.path.isfile(index_file_path)) # index file have enough lines @@ -211,62 +185,35 @@ def __check_ark_annotations(self, asset_id: str, export_path: str, expected_firs for col_idx in range(2): self.assertEqual(expected_first_two_cols[line_idx][col_idx], int(line_components[col_idx].strip())) - # public: test cases - def test_normal_00(self): - ''' normal case: ark format ''' - asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'} - train_path = os.path.join(self._dest_root, 'train') - - data_exporter.export(mir_root=self._mir_root, - assets_location=self._assets_location, - class_type_ids={ - 2: 0, - 52: 1 - }, - asset_ids=asset_ids, - asset_dir=train_path, - annotation_dir=train_path, - need_ext=True, - need_id_sub_folder=False, - base_branch='a', - base_task_id='a', - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK, - index_file_path=os.path.join(train_path, 'index.tsv'), - index_assets_prefix='') - - # check result - self.__check_result(asset_ids=asset_ids, - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_ARK, - export_path=train_path, - index_file_path=os.path.join(train_path, 'index.tsv')) - self.__check_ark_annotations(asset_id='430df22960b0f369318705800139fcc8ec38a3e4', - export_path=train_path, - expected_first_two_cols=[(1, 104), (1, 133), (0, 195), (0, 26)]) - - def test_normal_01(self): - ''' normal case: voc format ''' - asset_ids = {'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'} + def test_data_rw_00(self): train_path = os.path.join(self._dest_root, 'train') - data_exporter.export(mir_root=self._mir_root, - assets_location=self._assets_location, - class_type_ids={ - 2: 0, - 52: 1 - }, - asset_ids=asset_ids, - asset_dir=train_path, - annotation_dir=train_path, - need_ext=True, - need_id_sub_folder=False, - base_branch='a', - base_task_id='a', - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC, - index_file_path=os.path.join(train_path, 'index.tsv'), - index_assets_prefix='') - - # check result - self.__check_result(asset_ids=asset_ids, - format_type=data_exporter.ExportFormat.EXPORT_FORMAT_VOC, - export_path=train_path, - index_file_path=os.path.join(train_path, 'index.tsv')) + index_file_path = os.path.join(train_path, 'index.tsv') + + mir_metadatas: mirpb.MirMetadatas + mir_annotations: mirpb.MirAnnotations + typ_rev_tid = revs_parser.parse_single_arg_rev('tr:a@a', need_tid=True) + [mir_metadatas, mir_annotations] = mir_storage_ops.MirStorageOps.load_multiple_storages( + mir_root=self._mir_root, + mir_branch=typ_rev_tid.rev, + mir_task_id=typ_rev_tid.tid, + ms_list=[mirpb.MirStorage.MIR_METADATAS, mirpb.MirStorage.MIR_ANNOTATIONS], + ) + ec = mirpb.ExportConfig(asset_format=mirpb.AssetFormat.AF_RAW, + asset_dir=train_path, + media_location=self._assets_location, + need_sub_folder=False, + anno_format=mirpb.AnnoFormat.AF_DET_ARK_JSON, + gt_dir=train_path,) + exporter.export_mirdatas_to_dir( + mir_metadatas=mir_metadatas, + ec=ec, + mir_annotations=mir_annotations, + class_ids_mapping={2: 0, 52: 1}, + cls_id_mgr=None, + ) + + self.__check_result( + asset_ids={'430df22960b0f369318705800139fcc8ec38a3e4', 'a3008c032eb11c8d9ffcb58208a36682ee40900f'}, + export_path=train_path, + index_file_path=index_file_path) diff --git a/ymir/command/tests/unit/test_tools_class_ids.py b/ymir/command/tests/unit/test_tools_class_ids.py new file mode 100644 index 0000000000..92db407add --- /dev/null +++ b/ymir/command/tests/unit/test_tools_class_ids.py @@ -0,0 +1,52 @@ +import os +import shutil +import unittest + +from mir.tools.class_ids import load_or_create_userlabels +from tests import utils as test_utils + + +class TestToolsClassIds(unittest.TestCase): + # life cycle + def __init__(self, methodName: str) -> None: + super().__init__(methodName) + self._test_root = test_utils.dir_test_root(self.id().split(".")[-3:]) + + def setUp(self) -> None: + self._prepare_dirs() + self._prepare_label_file() + return super().setUp() + + def tearDown(self) -> None: + self._deprepare_dirs() + return super().tearDown() + + # protected: setup and teardown + def _prepare_dirs(self) -> None: + test_utils.remake_dirs(self._test_root) + + def _deprepare_dirs(self) -> None: + if os.path.isdir(self._test_root): + shutil.rmtree(self._test_root) + + def _prepare_label_file(self) -> None: + test_utils.prepare_labels(mir_root=self._test_root, names=['a', 'b', 'c']) + + # public: test cases + def test_read(self) -> None: + cim = load_or_create_userlabels(mir_root=self._test_root) + self.assertEqual([0, 1, 2], cim.all_ids()) + self.assertEqual([0, 1, 2], cim.id_for_names(['a', 'b', 'c'])[0]) + + def test_write(self) -> None: + cim = load_or_create_userlabels(mir_root=self._test_root) + self.assertEqual((3, 'd'), cim.add_main_name('D')) + self.assertEqual((4, 'e'), cim.add_main_name(' e ')) + self.assertEqual([0, 1, 2, 3, 4], cim.all_ids()) + self.assertEqual([0, 1, 2, 3, 4], cim.id_for_names(['a', 'b', 'c', 'd', 'e'])[0]) + + def test_write_abnormal(self) -> None: + cim = load_or_create_userlabels(mir_root=self._test_root) + cim.add_main_name('a') + self.assertEqual([0, 1, 2], cim.all_ids()) + self.assertEqual([0, 1, 2], cim.id_for_names(['a', 'b', 'c'])[0]) diff --git a/ymir/command/tests/unit/test_tools_context.py b/ymir/command/tests/unit/test_tools_context.py deleted file mode 100644 index 4daf18817d..0000000000 --- a/ymir/command/tests/unit/test_tools_context.py +++ /dev/null @@ -1,45 +0,0 @@ -import os -import shutil -import unittest - -from mir.tools import context -from tests import utils as test_utils - - -class TestToolsContext(unittest.TestCase): - # life cycle - def __init__(self, methodName: str = ...) -> None: - super().__init__(methodName) - self._test_root = test_utils.dir_test_root(self.id().split(".")[-3:]) - - def setUp(self) -> None: - self._prepare_dirs() - return super().setUp() - - def tearDown(self) -> None: - self._deprepare_dirs() - return super().tearDown() - - # protected: setup and teardown - def _prepare_dirs(self) -> None: - test_utils.remake_dirs(os.path.join(self._test_root, '.mir')) - - def _deprepare_dirs(self) -> None: - if os.path.isdir(self._test_root): - shutil.rmtree(self._test_root) - - # test cases - def test_00(self): - # case 1 - project_class_ids = [1, 2, 3] - context.save(mir_root=self._test_root, project_class_ids=project_class_ids) - - saved_project_class_ids = context.load(mir_root=self._test_root) - self.assertEqual(project_class_ids, saved_project_class_ids) - - # case 2 - project_class_ids = [] - context.save(mir_root=self._test_root, project_class_ids=project_class_ids) - - saved_project_class_ids = context.load(mir_root=self._test_root) - self.assertEqual(project_class_ids, saved_project_class_ids) diff --git a/ymir/command/tests/unit/test_tools_det_eval.py b/ymir/command/tests/unit/test_tools_det_eval.py index c1749a4cd3..d90da4a174 100644 --- a/ymir/command/tests/unit/test_tools_det_eval.py +++ b/ymir/command/tests/unit/test_tools_det_eval.py @@ -1,14 +1,13 @@ -from collections import Counter -import json import os import shutil +from typing import Any import unittest from google.protobuf import json_format import numpy as np from mir.protos import mir_command_pb2 as mirpb -from mir.tools import det_eval, mir_storage_ops, revs_parser +from mir.tools import det_eval_ctl_ops, det_eval_coco, det_eval_voc, mir_storage_ops, revs_parser from tests import utils as test_utils @@ -16,6 +15,7 @@ class TestToolsDetEval(unittest.TestCase): # life cycle def __init__(self, methodName: str = ...) -> None: super().__init__(methodName) + self.maxDiff = None self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) self._working_root = os.path.join(self._test_root, 'work') self._mir_root = os.path.join(self._test_root, 'mir-root') @@ -39,10 +39,8 @@ def _prepare_dirs(self) -> None: def _prepare_mir_repo(self) -> None: test_utils.mir_repo_init(self._mir_root) self._prepare_mir_repo_branch_a() - self._prepare_mir_repo_branch_b() def _prepare_mir_repo_branch_a(self) -> None: - """ branch a: a ground truth branch """ metadatas_dict = { 'attributes': { 'a0': { @@ -72,194 +70,179 @@ def _prepare_mir_repo_branch_a(self) -> None: json_format.ParseDict(metadatas_dict, mir_metadatas) annotations_dict = { - 'task_annotations': { - 'a': { - 'image_annotations': { - 'a0': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 50, - 'y': 50, - 'w': 50, - 'h': 50, - }, - 'class_id': 0, - 'score': 1, - }, { - 'index': 1, - 'box': { - 'x': 150, - 'y': 50, - 'w': 75, - 'h': 75, - }, - 'class_id': 0, - 'score': 1, - }, { - 'index': 2, - 'box': { - 'x': 150, - 'y': 150, - 'w': 75, - 'h': 75, - }, - 'class_id': 1, - 'score': 1, - }, { - 'index': 3, - 'box': { - 'x': 350, - 'y': 50, - 'w': 100, - 'h': 100, - }, - 'class_id': 2, - 'score': 1, - }] - }, - 'a1': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 300, - 'y': 300, - 'w': 100, - 'h': 100, - }, - 'class_id': 2, - 'score': 1, - }] - }, - } - } + 'prediction': { + 'image_annotations': { + 'a0': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 45, + 'y': 45, + 'w': 52, + 'h': 52, + }, + 'class_id': 0, + 'polygon': [], + 'score': 0.7, + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 73, + 'h': 73, + }, + 'polygon': [], + 'class_id': 0, + 'score': 0.8, + }, { + 'index': 2, + 'box': { + 'x': 350, + 'y': 50, + 'w': 76, + 'h': 76, + }, + 'polygon': [], + 'class_id': 0, + 'score': 0.9, + }, { + 'index': 3, + 'box': { + 'x': 150, + 'y': 160, + 'w': 78, + 'h': 78, + }, + 'polygon': [], + 'class_id': 1, + 'score': 0.9, + }, { + 'index': 4, + 'box': { + 'x': 350, + 'y': 50, + 'w': 102, + 'h': 103, + }, + 'class_id': 2, + 'polygon': [], + 'score': 0.9, + }], + 'img_class_ids': [0, 1, 2], + 'polygons': [], + }, + 'a1': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 300, + 'y': 300, + 'w': 103, + 'h': 110, + }, + 'polygon': [], + 'class_id': 2, + 'score': 0.9, + }], + 'img_class_ids': [2], + 'polygons': [], + }, + }, + 'eval_class_ids': [0, 1, 2], + 'task_class_ids': [0, 1, 2], }, - 'head_task_id': 'a' - } - mir_annotations = mirpb.MirAnnotations() - json_format.ParseDict(annotations_dict, mir_annotations) - - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') - mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, - mir_branch='a', - his_branch='master', - mir_datas={ - mirpb.MirStorage.MIR_METADATAS: mir_metadatas, - mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, - }, - task=task) - - def _prepare_mir_repo_branch_b(self) -> None: - """ branch b: a prediction / detection branch """ - metadatas_dict = { - 'attributes': { + 'image_cks': { 'a0': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 + 'cks': { + 'weather': 'sunny', + 'color': 'red', + }, }, 'a1': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 + 'cks': { + 'weather': 'sunny', + 'color': 'blue', + }, }, - 'a2': { - 'assetType': 'AssetTypeImageJpeg', - 'tvtType': 'TvtTypeUnknown', - 'width': 500, - 'height': 500, - 'imageChannels': 3 - } - } - } - mir_metadatas = mirpb.MirMetadatas() - json_format.ParseDict(metadatas_dict, mir_metadatas) - - annotations_dict = { - 'task_annotations': { - 'b': { - 'image_annotations': { - 'a0': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 45, - 'y': 45, - 'w': 52, - 'h': 52, - }, - 'class_id': 0, - 'score': 0.7, - }, { - 'index': 1, - 'box': { - 'x': 150, - 'y': 50, - 'w': 73, - 'h': 73, - }, - 'class_id': 0, - 'score': 0.8, - }, { - 'index': 2, - 'box': { - 'x': 350, - 'y': 50, - 'w': 76, - 'h': 76, - }, - 'class_id': 0, - 'score': 0.9, - }, { - 'index': 3, - 'box': { - 'x': 150, - 'y': 160, - 'w': 78, - 'h': 78, - }, - 'class_id': 1, - 'score': 0.9, - }, { - 'index': 4, - 'box': { - 'x': 350, - 'y': 50, - 'w': 102, - 'h': 103, - }, - 'class_id': 2, - 'score': 0.9, - }] - }, - 'a1': { - 'annotations': [{ - 'index': 0, - 'box': { - 'x': 300, - 'y': 300, - 'w': 103, - 'h': 110, - }, - 'class_id': 2, - 'score': 0.9, - }] - }, - } - } }, - 'head_task_id': 'b' + 'ground_truth': { + 'image_annotations': { + 'a0': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 50, + 'y': 50, + 'w': 50, + 'h': 50, + }, + 'polygon': [], + 'class_id': 0, + 'score': 1, + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 75, + 'h': 75, + }, + 'polygon': [], + 'class_id': 0, + 'score': 1, + }, { + 'index': 2, + 'box': { + 'x': 150, + 'y': 150, + 'w': 75, + 'h': 75, + }, + 'polygon': [], + 'class_id': 1, + 'score': 1, + }, { + 'index': 3, + 'box': { + 'x': 350, + 'y': 50, + 'w': 100, + 'h': 100, + }, + 'polygon': [], + 'class_id': 2, + 'score': 1, + }], + 'img_class_ids': [0, 1, 2], + 'polygons': [], + }, + 'a1': { + 'boxes': [{ + 'index': 0, + 'box': { + 'x': 300, + 'y': 300, + 'w': 100, + 'h': 100, + }, + 'polygon': [], + 'class_id': 2, + 'score': 1, + }], + 'img_class_ids': [2], + 'polygons': [], + }, + }, + 'task_class_ids': [0, 1, 2], + } } mir_annotations = mirpb.MirAnnotations() json_format.ParseDict(annotations_dict, mir_annotations) - task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='b', message='import') + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, - mir_branch='b', + mir_branch='a', his_branch='master', mir_datas={ mirpb.MirStorage.MIR_METADATAS: mir_metadatas, @@ -271,36 +254,302 @@ def _deprepare_dirs(self) -> None: if os.path.isdir(self._test_root): shutil.rmtree(self._test_root) + # private: check result + def _check_fpfn(self, actual_mir_annotations: mirpb.MirAnnotations) -> None: + expected_annotations_dict = { + 'ground_truth': { + 'image_annotations': { + 'a0': { + 'boxes': [{ + 'box': { + 'x': 50, + 'y': 50, + 'w': 50, + 'h': 50, + 'rotate_angle': 0.0 + }, + 'score': 1.0, + 'cm': 'MTP', + 'index': 0, + 'class_id': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'polygon': [], + 'det_link_id': 0, + 'class_name': '' + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 75, + 'h': 75, + 'rotate_angle': 0.0 + }, + 'score': 1.0, + 'polygon': [], + 'cm': 'MTP', + 'det_link_id': 1, + 'class_id': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }, { + 'index': 2, + 'box': { + 'x': 150, + 'y': 150, + 'w': 75, + 'h': 75, + 'rotate_angle': 0.0 + }, + 'class_id': 1, + 'score': 1.0, + 'cm': 'MTP', + 'det_link_id': 3, + 'polygon': [], + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }, { + 'index': 3, + 'box': { + 'x': 350, + 'y': 50, + 'w': 100, + 'h': 100, + 'rotate_angle': 0.0 + }, + 'class_id': 2, + 'score': 1.0, + 'cm': 'IGNORED', + 'det_link_id': -1, + 'polygon': [], + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }], + 'img_class_ids': [0, 1, 2], + 'polygons': [], + }, + 'a1': { + 'boxes': [{ + 'box': { + 'x': 300, + 'y': 300, + 'w': 100, + 'h': 100, + 'rotate_angle': 0.0 + }, + 'class_id': 2, + 'score': 1.0, + 'cm': 'IGNORED', + 'det_link_id': -1, + 'polygon': [], + 'index': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }], + 'img_class_ids': [2], + 'polygons': [], + }, + }, + 'task_class_ids': [0, 1, 2], + 'task_id': 'a', + 'map_id_color': {}, + 'eval_class_ids': [], + 'executor_config': '', + 'type': 'AT_UNKNOWN', + }, + 'prediction': { + 'image_annotations': { + 'a1': { + 'boxes': [{ + 'box': { + 'x': 300, + 'y': 300, + 'w': 103, + 'h': 110, + 'rotate_angle': 0.0 + }, + 'class_id': 2, + 'score': 0.9, + 'cm': 'IGNORED', + 'polygon': [], + 'det_link_id': -1, + 'index': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }], + 'img_class_ids': [2], + 'polygons': [], + }, + 'a0': { + 'boxes': [{ + 'box': { + 'x': 45, + 'y': 45, + 'w': 52, + 'h': 52, + 'rotate_angle': 0.0 + }, + 'score': 0.7, + 'cm': 'TP', + 'index': 0, + 'polygon': [], + 'class_id': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'det_link_id': 0, + 'class_name': '' + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 73, + 'h': 73, + 'rotate_angle': 0.0 + }, + 'score': 0.8, + 'cm': 'TP', + 'det_link_id': 1, + 'polygon': [], + 'class_id': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }, { + 'index': 2, + 'box': { + 'x': 350, + 'y': 50, + 'w': 76, + 'h': 76, + 'rotate_angle': 0.0 + }, + 'score': 0.9, + 'cm': 'FP', + 'det_link_id': -1, + 'polygon': [], + 'class_id': 0, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }, { + 'index': 3, + 'box': { + 'x': 150, + 'y': 160, + 'w': 78, + 'h': 78, + 'rotate_angle': 0.0 + }, + 'class_id': 1, + 'score': 0.9, + 'cm': 'TP', + 'polygon': [], + 'det_link_id': 2, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }, { + 'index': 4, + 'box': { + 'x': 350, + 'y': 50, + 'w': 102, + 'h': 103, + 'rotate_angle': 0.0 + }, + 'class_id': 2, + 'score': 0.9, + 'polygon': [], + 'cm': 'IGNORED', + 'det_link_id': -1, + 'anno_quality': 0.0, + 'tags': {}, + 'class_name': '' + }], + 'img_class_ids': [0, 1, 2], + 'polygons': [], + } + }, + 'task_id': 'a', + 'map_id_color': {}, + 'eval_class_ids': [0, 1, 2], + 'executor_config': '', + 'type': 'AT_UNKNOWN', + 'task_class_ids': [0, 1, 2], + }, + 'image_cks': { + 'a1': { + 'cks': { + 'color': 'blue', + 'weather': 'sunny' + }, + 'image_quality': 0.0 + }, + 'a0': { + 'cks': { + 'color': 'red', + 'weather': 'sunny' + }, + 'image_quality': 0.0 + } + } + } + actual_annotations_dict = json_format.MessageToDict(actual_mir_annotations, + including_default_value_fields=True, + preserving_proto_field_name=True) + self.assertEqual(expected_annotations_dict, actual_annotations_dict) + # public: test cases - def test_mir_coco(self): - mir_coco = det_eval.MirCoco(mir_root=self._mir_root, - rev_tid=revs_parser.parse_single_arg_rev('a@a', need_tid=False), - conf_thr=0) - self.assertEqual(['a0', 'a1', 'a2'], mir_coco.get_asset_ids()) - self.assertEqual([0, 1, 2], mir_coco.get_asset_idxes()) - self.assertEqual([0, 1, 2], mir_coco.get_class_ids()) + def test_det_eval_coco_00(self) -> None: + sde = self._test_det_eval(det_eval_model_name=det_eval_coco) + see = sde.iou_averaged_evaluation.ci_averaged_evaluation + self.assertTrue(np.isclose(0.833333, see.ap)) - self.assertEqual(2, len(mir_coco.img_cat_to_annotations[(0, 0)])) + def test_det_eval_voc_00(self) -> None: + sde = self._test_det_eval(det_eval_model_name=det_eval_voc) + see = sde.iou_averaged_evaluation.ci_averaged_evaluation + self.assertTrue(np.isclose(0.833333, see.ap)) - def test_mir_eval_00(self): - """ align our eval with original COCOeval """ + def test_det_eval_ctl_ops(self) -> None: + gt_pred_rev_tid = revs_parser.parse_single_arg_rev('a@a', need_tid=False) + evaluate_config = mirpb.EvaluateConfig() + evaluate_config.conf_thr = 0.0005 + evaluate_config.iou_thrs_interval = '0.5' + evaluate_config.need_pr_curve = False + evaluate_config.main_ck = 'color' + evaluation = det_eval_ctl_ops.det_evaluate_datasets(mir_root=self._mir_root, + gt_rev_tid=gt_pred_rev_tid, + pred_rev_tid=gt_pred_rev_tid, + evaluate_config=evaluate_config) + self.assertIsNotNone(evaluation) + self.assertEqual({'blue', 'red'}, set(evaluation.sub_cks.keys())) - # original result from pycocotools - expected_stats = np.array( - [0.61177118, 0.88888889, 0.41749175, -1.0, 0.46716172, 0.9009901, 0.46666667, 0.7, 0.7, -1.0, 0.6, 0.9]) + evaluate_config.main_ck = 'FakeMainCk' + evaluation = det_eval_ctl_ops.det_evaluate_datasets(mir_root=self._mir_root, + gt_rev_tid=gt_pred_rev_tid, + pred_rev_tid=gt_pred_rev_tid, + evaluate_config=evaluate_config) + self.assertIsNone(evaluation) - # ymir's eval - mir_gt = det_eval.MirCoco(mir_root=self._mir_root, - rev_tid=revs_parser.parse_single_arg_rev('a@a', need_tid=False), - conf_thr=0) - mir_dt = det_eval.MirCoco(mir_root=self._mir_root, - rev_tid=revs_parser.parse_single_arg_rev('b@b', need_tid=False), - conf_thr=0) - mir_evaluator = det_eval.MirDetEval(coco_gt=mir_gt, coco_dt=mir_dt) - mir_evaluator.evaluate() - mir_evaluator.accumulate() - mir_evaluator.summarize() - self.assertTrue(np.isclose(expected_stats, mir_evaluator.stats).all()) + # protected: test cases + def _test_det_eval(self, det_eval_model_name: Any) -> mirpb.SingleDatasetEvaluation: + mir_annotations: mirpb.MirAnnotations = mir_storage_ops.MirStorageOps.load_single_storage( + mir_root=self._mir_root, mir_branch='a', mir_task_id='a', ms=mirpb.MirStorage.MIR_ANNOTATIONS) - mir_evaluation_result = mir_evaluator.get_evaluation_result() - self.assertTrue(len(mir_evaluation_result.iou_evaluations) > 0) + evaluate_config = mirpb.EvaluateConfig() + evaluate_config.conf_thr = 0.0005 + evaluate_config.iou_thrs_interval = '0.5' + evaluate_config.need_pr_curve = True + evaluate_config.class_ids[:] = [0, 1] + evaluation: mirpb.Evaluation = det_eval_model_name.det_evaluate(prediction=mir_annotations.prediction, + ground_truth=mir_annotations.ground_truth, + config=evaluate_config) + self._check_fpfn(mir_annotations) + return evaluation.dataset_evaluation diff --git a/ymir/command/tests/unit/test_tools_exodus.py b/ymir/command/tests/unit/test_tools_exodus.py index 46cae83d66..5d311c9930 100644 --- a/ymir/command/tests/unit/test_tools_exodus.py +++ b/ymir/command/tests/unit/test_tools_exodus.py @@ -1,9 +1,7 @@ -import logging import os import shutil from typing import Type import unittest -import zlib import google.protobuf.json_format as pb_format diff --git a/ymir/command/tests/unit/test_tools_mir_storage.py b/ymir/command/tests/unit/test_tools_mir_storage.py index d24dfcd0da..7ac37353fd 100644 --- a/ymir/command/tests/unit/test_tools_mir_storage.py +++ b/ymir/command/tests/unit/test_tools_mir_storage.py @@ -4,10 +4,10 @@ import unittest import google.protobuf.json_format as pb_format +from google.protobuf.json_format import MessageToDict from mir.protos import mir_command_pb2 as mirpb -from mir.tools import context, mir_storage, mir_storage_ops -from mir.tools.errors import MirError +from mir.tools import mir_storage, mir_storage_ops, settings as mir_settings from tests import utils as test_utils @@ -68,112 +68,86 @@ def _prepare_mir_pb(self, with_project: bool) -> tuple: pb_format.ParseDict(dict_metadatas, mir_metadatas) dict_annotations = { - "task_annotations": { - "mining-task-id": { - "image_annotations": { - "a001": { - 'annotations': [{ - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'classId': 1 - }, { - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'classId': 2 - }] - }, - "a002": { - 'annotations': [{ - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'classId': 2 - }, { - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'classId': 3 - }] - }, - "a003": { - 'annotations': [{ - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'classId': 3 - }, { - 'box': { - 'x': 26, - 'y': 189, - 'w': 19, - 'h': 50 - }, - 'classId': 3 - }] - } + "prediction": { + 'task_id': 'mining-task-id', + "image_annotations": { + "a001": { + 'boxes': [{ + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50 + }, + 'classId': 1 + }, { + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50 + }, + 'classId': 2 + }] + }, + "a002": { + 'boxes': [{ + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50 + }, + 'classId': 2 + }, { + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50 + }, + 'classId': 3 + }] + }, + "a003": { + 'boxes': [{ + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50 + }, + 'classId': 3 + }, { + 'box': { + 'x': 26, + 'y': 189, + 'w': 19, + 'h': 50 + }, + 'classId': 3 + }] } } } } pb_format.ParseDict(dict_annotations, mir_annotations) - dict_keywords = { - 'keywords': { - 'a001': { - 'predifined_keyids': [1, 2] - }, - 'a002': { - 'predifined_keyids': [2, 3] - }, - 'a003': { - 'predifined_keyids': [3] - }, - }, - 'index_predifined_keyids': { - 1: { - 'asset_ids': ['a001'] - }, - 2: { - 'asset_ids': ['a001', 'a002'] - }, - 3: { - 'asset_ids': ['a002', 'a003'] - }, - } - } - pb_format.ParseDict(dict_keywords, mir_keywords) - dict_context = { 'images_cnt': 3, - 'negative_images_cnt': 0, - 'project_negative_images_cnt': (1 if with_project else 0), - 'predefined_keyids_cnt': { - 1: 1, - 2: 2, - 3: 2, + 'pred_stats': { + 'total_cnt': 6, + 'positive_asset_cnt': 3, + 'negative_asset_cnt': 0, + 'class_ids_cnt': { + 1: 1, + 2: 2, + 3: 2, + }, }, - 'project_predefined_keyids_cnt': ({ - 3: 2, - 4: 0 - } if with_project else {}), - 'customized_keywords_cnt': {}, + 'gt_stats': { + 'negative_asset_cnt': 3 + } } pb_format.ParseDict(dict_context, mir_context) @@ -187,8 +161,17 @@ def _prepare_mir_pb(self, with_project: bool) -> tuple: 'model': { 'model_hash': 'abc123', 'mean_average_precision': 0.5, - 'context': 'fake_context' - } + 'context': 'fake_context', + 'stages': {}, + 'best_stage_name': '', + }, + 'evaluation': { + 'config': { + 'conf_thr': mir_settings.DEFAULT_EVALUATE_CONF_THR, + 'iou_thrs_interval': mir_settings.DEFAULT_EVALUATE_IOU_THR, + }, + 'state': mirpb.EvaluationState.ES_NO_CLASS_IDS, + }, } }, 'head_task_id': 'mining-task-id', @@ -202,7 +185,7 @@ def test_normal_00(self): """ normal cases: no project context, commit twice """ - mir_metadatas, mir_annotations, mir_keywords, mir_tasks, mir_context = self._prepare_mir_pb(with_project=False) + mir_metadatas, mir_annotations, _, mir_tasks, mir_context = self._prepare_mir_pb(with_project=False) mir_datas_expect = { mirpb.MirStorage.MIR_METADATAS: mir_metadatas, mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, @@ -226,13 +209,6 @@ def test_normal_00(self): ) actual_data = dict(zip(mir_storage_list, mir_datas)) self.assertDictEqual(actual_data, mir_datas_expect) - loaded_mir_keywords = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=self._mir_root, - mir_branch='a', - ms=mirpb.MirStorage.MIR_KEYWORDS, - mir_task_id='mining-task-id', - as_dict=False) - self.assertDictEqual(pb_format.MessageToDict(mir_keywords), - pb_format.MessageToDict(loaded_mir_keywords)) loaded_mir_context = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=self._mir_root, mir_branch='a', ms=mirpb.MirStorage.MIR_CONTEXT, @@ -241,12 +217,14 @@ def test_normal_00(self): try: self.assertEqual(loaded_mir_context, mir_context) except AssertionError as e: - logging.info(f"expected: {mir_context}") - logging.info(f"actual: {loaded_mir_context}") + logging.info(f"expected: {MessageToDict(mir_context, preserving_proto_field_name=True)}") + logging.info(f"actual: {MessageToDict(loaded_mir_context, preserving_proto_field_name=True)}") raise e # add another commit a@t2, which has empty dataset task_2 = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeMining, task_id='t2', message='task-t2') + task_2.evaluation.config.CopyFrom(mir_storage_ops.create_evaluate_config()) + task_2.evaluation.state = mirpb.EvaluationState.ES_NO_CLASS_IDS mir_tasks_2 = mirpb.MirTasks() mir_tasks_2.head_task_id = task_2.task_id mir_tasks_2.tasks[task_2.task_id].CopyFrom(task_2) @@ -255,10 +233,12 @@ def test_normal_00(self): mirpb.MirMetadatas(), mirpb.MirStorage.MIR_ANNOTATIONS: pb_format.ParseDict({ - "task_annotations": { - "t2": {} + "prediction": { + 'task_id': 't2', + }, + "ground_truth": { + 'task_id': 't2', }, - "head_task_id": "t2", }, mirpb.MirAnnotations()), mirpb.MirStorage.MIR_TASKS: mir_tasks_2, @@ -295,22 +275,6 @@ def test_normal_00(self): print(f"mir_datas_expect_2: {mir_datas_expect_2}") self.assertDictEqual(actual_data, mir_datas_expect_2) - # load_single_model: have model - actual_dict_model = mir_storage_ops.MirStorageOps.load_single_model(mir_root=self._mir_root, - mir_branch='a', - mir_task_id='mining-task-id') - self.assertEqual( - actual_dict_model, { - 'model_hash': 'abc123', - 'mean_average_precision': 0.5, - 'context': 'fake_context', - 'executor_config': {}, - 'task_parameters': '' - }) - # load_single_model: have no model - with self.assertRaises(MirError): - mir_storage_ops.MirStorageOps.load_single_model(mir_root=self._mir_root, mir_branch='a', mir_task_id='t2') - actual_contents_list = mir_storage_ops.MirStorageOps.load_multiple_storages( mir_root=self._mir_root, mir_branch='a', @@ -320,9 +284,6 @@ def test_normal_00(self): self.assertEqual(len(actual_contents_list), 5) def test_normal_01(self): - # change project settings - context.save(mir_root=self._mir_root, project_class_ids=[3, 4]) - mir_metadatas, mir_annotations, mir_keywords, mir_tasks, mir_context = self._prepare_mir_pb(with_project=True) mir_datas_expect = { @@ -334,12 +295,6 @@ def test_normal_01(self): his_branch='master', mir_datas=mir_datas_expect, task=mir_tasks.tasks[mir_tasks.head_task_id]) - loaded_mir_keywords = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=self._mir_root, - mir_branch='a', - ms=mirpb.MirStorage.MIR_KEYWORDS, - mir_task_id='mining-task-id', - as_dict=False) - self.assertDictEqual(pb_format.MessageToDict(mir_keywords), pb_format.MessageToDict(loaded_mir_keywords)) loaded_mir_context = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=self._mir_root, mir_branch='a', ms=mirpb.MirStorage.MIR_CONTEXT, diff --git a/ymir/command/tests/unit/test_tools_phase_logger.py b/ymir/command/tests/unit/test_tools_phase_logger.py index 7796295f46..4dd9dbe3a5 100644 --- a/ymir/command/tests/unit/test_tools_phase_logger.py +++ b/ymir/command/tests/unit/test_tools_phase_logger.py @@ -186,7 +186,7 @@ def test_00(self, mock_run): sub_phase_loggers = PhaseLoggerCenter.loggers() sub_logger: PhaseLogger = sub_phase_loggers['copy.read'] - self.assertTrue(math.isclose(0, sub_logger.start_percent)) + self.assertTrue(math.isclose(0.05, sub_logger.start_percent)) self.assertTrue(math.isclose(0.4, sub_logger.end_percent)) self.assertFalse(sub_logger.auto_done) diff --git a/ymir/command/tests/unit/test_tools_store_assets.py b/ymir/command/tests/unit/test_tools_store_assets.py deleted file mode 100644 index b62f0dae51..0000000000 --- a/ymir/command/tests/unit/test_tools_store_assets.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -import shutil -import unittest - -import mir.tools.utils as mir_utils -from tests import utils as test_utils - - -class TestStoreAssetsToDir(unittest.TestCase): - def test_00(self): - asset_ids = ["997c3e6ebd3e59dbe32656099c461e417c4693a3", '917c3e6ebd3e59dbe32656099c4614417c4693a3'] - tmp_dir = test_utils.dir_test_root(self.id().split(".")[-3:]) - if os.path.exists(tmp_dir): - shutil.rmtree(tmp_dir) - os.makedirs(tmp_dir, exist_ok=True) - - src_folder = os.path.join(tmp_dir, 'img') - os.makedirs(src_folder, exist_ok=True) - for id in asset_ids: - with open(os.path.join(src_folder, id), 'w') as f: - f.write("1") - - asset_id_to_rel_paths = mir_utils.store_assets_to_dir(asset_ids=asset_ids, - out_root=tmp_dir, - sub_folder='media', - asset_location=src_folder) - self.assertEqual(set(asset_ids), set(asset_id_to_rel_paths.keys())) - for _, asset_path in asset_id_to_rel_paths.items(): - self.assertTrue(os.path.isfile(os.path.join(tmp_dir, asset_path))) - shutil.rmtree(tmp_dir) diff --git a/ymir/command/tests/utils.py b/ymir/command/tests/utils.py index 2c48f39db8..3d625c4ab9 100644 --- a/ymir/command/tests/utils.py +++ b/ymir/command/tests/utils.py @@ -1,16 +1,14 @@ import os import shutil import subprocess -from typing import List, Type +from typing import Any, List, Type import yaml from mir.commands.init import CmdInit from mir.commands.checkout import CmdCheckout -from mir.protos import mir_command_pb2 as mirpb from mir.tools import class_ids from mir.tools.code import MirCode -from mir.tools.mir_storage_ops import MirStorageOps def dir_test_root(sub_dirs: List[str]) -> str: @@ -57,11 +55,34 @@ def remake_dirs(path: str): def prepare_labels(mir_root: str, names: List[str]): - labels: List[class_ids._SingleLabel] = [] + labels: List[class_ids.SingleLabel] = [] for idx, name in enumerate(names): components = name.split(',') - labels.append(class_ids._SingleLabel(id=idx, name=components[0], aliases=components[1:])) - label_storage = class_ids._LabelStorage(labels=labels) + labels.append(class_ids.SingleLabel(id=idx, name=components[0], aliases=components[1:])) + label_storage = class_ids.LabelStorage(labels=labels) with open(class_ids.ids_file_path(mir_root=mir_root), 'w') as f: yaml.safe_dump(label_storage.dict(), f) + + +def diff_dicts(a_dict: dict, b_dict: dict, stack: list) -> None: + if set(a_dict.keys()) != set(b_dict.keys()): + raise ValueError(f"stack: {stack} keys mismatched\na: {sorted(a_dict.keys())}\nb: {sorted(b_dict.keys())}") + for ka in a_dict: + va = a_dict[ka] + vb = b_dict[ka] + diff_types(va, vb, stack=stack + [ka]) + if isinstance(va, dict): + diff_dicts(a_dict=va, b_dict=vb, stack=stack + [ka]) + else: + diff_others(a=va, b=vb, stack=stack + [ka]) + + +def diff_types(a: Any, b: Any, stack: list) -> None: + if not isinstance(a, type(b)) and not isinstance(b, type(a)): + raise ValueError(f"stack: {stack} types mismatched: {type(a)} vs {type(b)}") + + +def diff_others(a: Any, b: Any, stack: list) -> None: + if a != b: + raise ValueError(f"stack: {stack}, other kind of values mismatched:\na: {a}\nb: {b}") diff --git a/ymir/command/tutorial/demo-cycle.sh b/ymir/command/tutorial/demo-cycle.sh index e123d5b910..5ae058066c 100755 --- a/ymir/command/tutorial/demo-cycle.sh +++ b/ymir/command/tutorial/demo-cycle.sh @@ -169,7 +169,7 @@ import() { $MIR_EXE checkout master --root "$MIR_ROOT" $MIR_EXE import --root "$MIR_ROOT" \ --index-file "$RAW_TRAINING_SET_INDEX_PATH" \ - --annotation-dir "$RAW_TRAINING_SET_ANNO_ROOT" \ + --pred-dir "$RAW_TRAINING_SET_ANNO_ROOT" \ --gen-dir "$YMIR_ASSET_LOCATION" \ --dataset-name "$TRAINING_SET_PREFIX" \ --dst-rev "$TRAINING_SET_PREFIX@$TRAINING_SET_PREFIX" @@ -179,7 +179,7 @@ import() { $MIR_EXE checkout master --root "$MIR_ROOT" $MIR_EXE import --root "$MIR_ROOT" \ --index-file "$RAW_VAL_SET_INDEX_PATH" \ - --annotation-dir "$RAW_VAL_SET_ANNO_ROOT" \ + --pred-dir "$RAW_VAL_SET_ANNO_ROOT" \ --gen-dir "$YMIR_ASSET_LOCATION" \ --dataset-name "$VAL_SET_PREFIX" \ --dst-rev "$VAL_SET_PREFIX@$VAL_SET_PREFIX" @@ -189,7 +189,7 @@ import() { $MIR_EXE checkout master --root "$MIR_ROOT" $MIR_EXE import --root "$MIR_ROOT" \ --index-file "$RAW_MINING_SET_INDEX_PATH" \ - --annotation-dir "$RAW_MINING_SET_ANNO_ROOT" \ + --pred-dir "$RAW_MINING_SET_ANNO_ROOT" \ --gen-dir "$YMIR_ASSET_LOCATION" \ --dataset-name "$MINING_SET_PREFIX" \ --dst-rev "$MINING_SET_PREFIX@$MINING_SET_PREFIX" @@ -336,7 +336,7 @@ outlabel() { _echo_in_color $C_YELLOW "export from $_MINED_SET_PREFIX-$2 to path $TMP_OUTLABEL_ASSET_ROOT/$_MINED_SET_PREFIX-$2" $MIR_EXE export --root $MIR_ROOT \ --asset-dir "$TMP_OUTLABEL_ASSET_ROOT/$_MINED_SET_PREFIX-$2" \ - --annotation-dir "$TMP_OUTLABEL_ASSET_ROOT/$_MINED_SET_PREFIX-$2" \ + --pred-dir "$TMP_OUTLABEL_ASSET_ROOT/$_MINED_SET_PREFIX-$2" \ --media-location "$YMIR_ASSET_LOCATION" \ --src-revs "$_MINED_SET_PREFIX-$2@$_MINED_SET_PREFIX-$2" \ --format "none" @@ -374,7 +374,7 @@ inlabel() { _echo_in_color $C_YELLOW "import" $MIR_EXE import --root "$MIR_ROOT" \ --index-file "$TMP_OUTLABEL_ASSET_ROOT/$_MINED_SET_PREFIX-$2.index.tsv" \ - --annotation-dir "$3" \ + --pred-dir "$3" \ --gen-dir "$YMIR_ASSET_LOCATION" \ --dataset-name "$MINING_SET_PREFIX" \ --src-revs "$_MINED_SET_PREFIX-$2" \ diff --git a/ymir/command/tutorial/demo-main.sh b/ymir/command/tutorial/demo-main.sh index c9fcb11b9a..f0bcc9943e 100644 --- a/ymir/command/tutorial/demo-main.sh +++ b/ymir/command/tutorial/demo-main.sh @@ -151,7 +151,7 @@ import_data_pascal() { printf '%s\n' "creating pascal index file... $IDX_PASCAL_ABS" find $PASCAL_DATA_IMG_ABS -iname \*.jpg >> $IDX_PASCAL_ABS fi - $MIR_EXE import --index-file $IDX_PASCAL_ABS --annotation-dir $PASCAL_DATA_ANNO_ABS --gen-dir $INTERMEDIATE_FOLDER_ABS --dataset-name $DEMO_PASCAL_BRANCH -t $DEMO_PASCAL_BRANCH + $MIR_EXE import --index-file $IDX_PASCAL_ABS --pred-dir $PASCAL_DATA_ANNO_ABS --gen-dir $INTERMEDIATE_FOLDER_ABS --dataset-name $DEMO_PASCAL_BRANCH -t $DEMO_PASCAL_BRANCH } import_data_coco() { @@ -161,7 +161,7 @@ import_data_coco() { printf '%s\n' "creating coco index file... $IDX_COCO_ABS" find $COCO_DATA_IMG_ABS -iname \*.jpg >> $IDX_COCO_ABS fi - $MIR_EXE import --index-file $IDX_COCO_ABS --annotation-dir $COCO_DATA_ANNO_ABS --gen-dir $INTERMEDIATE_FOLDER_ABS --dataset-name $DEMO_COCO_BRANCH -t $DEMO_COCO_BRANCH + $MIR_EXE import --index-file $IDX_COCO_ABS --pred-dir $COCO_DATA_ANNO_ABS --gen-dir $INTERMEDIATE_FOLDER_ABS --dataset-name $DEMO_COCO_BRANCH -t $DEMO_COCO_BRANCH } process_filter() { diff --git a/ymir/command/update_proto_py.sh b/ymir/command/update_proto_py.sh index 56b7e71a22..7df0142667 100755 --- a/ymir/command/update_proto_py.sh +++ b/ymir/command/update_proto_py.sh @@ -1,25 +1,30 @@ #!/bin/bash -# Python version >= 3.8.10 -# protoc version >= 3.13.0 -# grpc version >= 1.38.0 +# Python version == 3.8.10 +# protoc version == 3.13.0 +# https://github.com/protocolbuffers/protobuf/releases/download/v3.13.0 +# mv bin/protoc /usr/local +# mv include/google /usr/local/include/ + +# pip install protobuf==3.13.0 +# pip install mypy-protobuf==3.0.0 +# go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 set -e -INPUT_DIR=./proto -OUTPUT_DIR=./mir/protos -rm -rf $OUTPUT_DIR -mkdir -p $OUTPUT_DIR +INPUT_DIR="./proto" +PY_OUTPUT_DIR="./mir/protos" +GO_DIR_VIEWER="../backend/src/ymir_viewer/common" +GO_DIR_HEL="../backend/src/ymir_hel" +rm -rf ${PY_OUTPUT_DIR} +mkdir -p ${PY_OUTPUT_DIR} # gen protobuf py -python -m grpc_tools.protoc \ - -I "$INPUT_DIR" \ - --python_out="$OUTPUT_DIR" \ +protoc -I "${INPUT_DIR}" \ + --python_out="${PY_OUTPUT_DIR}" \ + --go_out="${GO_DIR_VIEWER}" \ + --go_out="${GO_DIR_HEL}" \ + --plugin=protoc-gen-mypy=$(which protoc-gen-mypy) --mypy_out=${PY_OUTPUT_DIR} \ "$INPUT_DIR/mir_command.proto" -# gen protobuf pyi for mypy -protoc --plugin=protoc-gen-mypy=$(which protoc-gen-mypy) --mypy_out=$OUTPUT_DIR "$INPUT_DIR/mir_command.proto" \ -&& mv $OUTPUT_DIR/proto/mir_command_pb2.pyi $OUTPUT_DIR/ \ -&& rm -rf $OUTPUT_DIR/proto - -touch $OUTPUT_DIR/__init__.py +touch ${PY_OUTPUT_DIR}/__init__.py diff --git a/ymir/backend/src/ymir_controller/client/__init__.py b/ymir/updater/app/__init__.py similarity index 100% rename from ymir/backend/src/ymir_controller/client/__init__.py rename to ymir/updater/app/__init__.py diff --git a/ymir/backend/src/ymir_viz/src/__init__.py b/ymir/updater/app/mir/protos/__init__.py similarity index 100% rename from ymir/backend/src/ymir_viz/src/__init__.py rename to ymir/updater/app/mir/protos/__init__.py diff --git a/ymir/updater/app/mir/protos/mir_command_110.proto b/ymir/updater/app/mir/protos/mir_command_110.proto new file mode 100644 index 0000000000..21437b6a7c --- /dev/null +++ b/ymir/updater/app/mir/protos/mir_command_110.proto @@ -0,0 +1,249 @@ +syntax = "proto3"; + +package mir.command110; + +/// assertion type: training, validation or test +enum TvtType { + TvtTypeUnknown = 0; + TvtTypeTraining = 1; + TvtTypeValidation = 2; + TvtTypeTest = 3; +}; + +enum AssetType { + AssetTypeUnknown = 0; + AssetTypeImageJpeg = 1; + AssetTypeImagePng = 2; + AssetTypeImagePixelMat = 3; + AssetTypeImageYuv420p = 4; + AssetTypeImageYuv420sp = 5; + AssetTypeImageYuv422p = 6; + AssetTypeImageYuv422sp = 7; + AssetTypeImageBmp = 8; + AssetTypeVideoMp4 = 101; +}; + +/// task type +enum TaskType { + TaskTypeUnknown = 0; + TaskTypeTraining = 1; + TaskTypeMining = 2; + TaskTypeLabel = 3; + TaskTypeFilter = 4; + TaskTypeImportData = 5; + TaskTypeExportData = 6; + TaskTypeCopyData = 7; + TaskTypeMerge = 8; + TaskTypeInfer = 9; + TaskTypeSampling = 10; + /// used by ymir_controller + TaskTypeFusion = 11; + TaskTypeInit = 12; + TaskTypeImportModel = 13; + TaskTypeEvaluate = 16; + + reserved 14, 15; +}; + +enum TaskState { + TaskStateUnknown = 0; + TaskStatePending = 1; + TaskStateRunning = 2; + TaskStateDone = 3; + TaskStateError = 4; + TaskStateMiss = 5; +}; + +enum Sha1Type { + SHA1_TYPE_UNKNOWN = 0; + SHA1_TYPE_ASSET = 1; + SHA1_TYPE_COMMIT = 2; +} + +enum MirStorage { + MIR_METADATAS = 0; + MIR_ANNOTATIONS = 1; + MIR_KEYWORDS = 2; + MIR_TASKS = 3; + MIR_CONTEXT = 4; +} + +enum LabelFormat { + NO_ANNOTATION = 0; + PASCAL_VOC = 1; + IF_ARK = 2; +}; + +/// ========== metadatas.mir ========== +message MirMetadatas { + /// key: asset hash, value: attributes + map attributes = 1; +}; + +message MetadataAttributes { + string dataset_name = 1; + Timestamp timestamp = 2; + TvtType tvt_type = 3; + AssetType asset_type = 4; + int32 width = 5; /// column number + int32 height = 6; /// row number + int32 image_channels = 7; /// (for images) channel count +}; + +message Timestamp { + /// start time stamp + int64 start = 1; + /// duration (in seconds), for images, it's always 0 + float duration = 2; +}; + +/// ========== annotations.mir ========== +message MirAnnotations { + /// key: task id, value: annotations of that single task + map task_annotations = 1; + string head_task_id = 2; +}; + +message SingleTaskAnnotations { + /// key: image id, value: annotations of that single image + map image_annotations = 1; +}; + +message SingleImageAnnotations { + repeated Annotation annotations = 2; +}; + +message Annotation { + // Index of this annotation in current single image, may be different from the index in repeated field. + int32 index = 1; + Rect box = 2; + int32 class_id = 3; + double score = 4; +}; + +message Rect { + int32 x = 1; + int32 y = 2; + int32 w = 3; + int32 h = 4; +}; + +/// ========== keywords.mir ========== +message MirKeywords { + // key: asset hash, value: keywords list + // cnt: count of keywords + map keywords = 1; + // key: class id, value: assert ids + map index_predifined_keyids = 6; + + reserved 2, 3, 4, 5; +}; + +message Assets { + repeated string asset_ids = 1; +}; + +message Keywords { + // predefined: managed id-keyword map + repeated int32 predifined_keyids = 1; + // customized: arbitrary user defined keywords + repeated string customized_keywords = 2; +}; + +/// ========== tasks.mir ========== +message MirTasks { + map tasks = 1; + string head_task_id = 2; +}; + +message Task { + TaskType type = 1; + /// user defined task name + string name = 2; + /// auto generated unique id + string task_id = 3; + /// execution time of this task + int64 timestamp = 5; // RFC 3339 date strings + /// (special for training task): result model for cmd train + ModelMeta model = 6; + /// (special for import task): unknown types for cmd import + map unknown_types = 7; + int32 return_code = 8; + string return_msg = 9; + Evaluation evaluation = 10; + + string serialized_task_parameters = 102; + string serialized_executor_config = 103; + string src_revs = 104; + string dst_rev = 105; + string executor = 106; + + reserved 4, 100, 101; +}; + +message ModelMeta { + /// hash for models.tar.gz + string model_hash = 1; + /// model mAP + float mean_average_precision = 2; + /// context generated by train command + string context = 3; +}; + +message Evaluation { + EvaluateConfig config = 1; + // key: prediction dataset id, value: evaluation result for ground truth and prediction dataset + map dataset_evaluations = 2; +} + +message EvaluateConfig { + string gt_dataset_id = 1; + repeated string pred_dataset_ids = 2; + float conf_thr = 3; + string iou_thrs_interval = 4; + bool need_pr_curve = 5; +} + +message SingleDatasetEvaluation { + float conf_thr = 1; + string gt_dataset_id = 2; + string pred_dataset_id = 3; + map iou_evaluations = 4; // key: string of iou threshold + SingleIouEvaluation iou_averaged_evaluation = 5; // average for all ious +} + +message SingleIouEvaluation { + map ci_evaluations = 1; // key: class ids + SingleTopicEvaluation ci_averaged_evaluation = 2; // evaluations averaged by class ids + map topic_evaluations = 3; // key: topic names +} + +message SingleTopicEvaluation { + float ap = 1; + float ar = 2; + int32 tp = 3; + int32 fp = 4; + int32 fn = 5; + repeated FloatPoint pr_curve = 6; +} + +message FloatPoint { + float x = 1; + float y = 2; +} + +/// ========== context.mir ========== +message MirContext { + /// total images count + int32 images_cnt = 1; + /// total negative images count (images without any annotations) + int32 negative_images_cnt = 2; + /// total negative images count (images without any project class names) + int32 project_negative_images_cnt = 3; + /// key: class id, value: images count + map predefined_keyids_cnt = 4; + /// key: class id (only in this project), value: images count + map project_predefined_keyids_cnt = 5; + /// key: customized keywords, value: images count + map customized_keywords_cnt = 6; +} diff --git a/ymir/updater/app/mir/protos/mir_command_110_pb2.py b/ymir/updater/app/mir/protos/mir_command_110_pb2.py new file mode 100644 index 0000000000..acc3624af9 --- /dev/null +++ b/ymir/updater/app/mir/protos/mir_command_110_pb2.py @@ -0,0 +1,2352 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: mir_command_110.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='mir_command_110.proto', + package='mir.command110', + syntax='proto3', + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x15mir_command_110.proto\x12\x0emir.command110\"\xa7\x01\n\x0cMirMetadatas\x12@\n\nattributes\x18\x01 \x03(\x0b\x32,.mir.command110.MirMetadatas.AttributesEntry\x1aU\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command110.MetadataAttributes:\x02\x38\x01\"\xe9\x01\n\x12MetadataAttributes\x12\x14\n\x0c\x64\x61taset_name\x18\x01 \x01(\t\x12,\n\ttimestamp\x18\x02 \x01(\x0b\x32\x19.mir.command110.Timestamp\x12)\n\x08tvt_type\x18\x03 \x01(\x0e\x32\x17.mir.command110.TvtType\x12-\n\nasset_type\x18\x04 \x01(\x0e\x32\x19.mir.command110.AssetType\x12\r\n\x05width\x18\x05 \x01(\x05\x12\x0e\n\x06height\x18\x06 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x07 \x01(\x05\",\n\tTimestamp\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"\xd4\x01\n\x0eMirAnnotations\x12M\n\x10task_annotations\x18\x01 \x03(\x0b\x32\x33.mir.command110.MirAnnotations.TaskAnnotationsEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a]\n\x14TaskAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.mir.command110.SingleTaskAnnotations:\x02\x38\x01\"\xd0\x01\n\x15SingleTaskAnnotations\x12V\n\x11image_annotations\x18\x01 \x03(\x0b\x32;.mir.command110.SingleTaskAnnotations.ImageAnnotationsEntry\x1a_\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.mir.command110.SingleImageAnnotations:\x02\x38\x01\"I\n\x16SingleImageAnnotations\x12/\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x1a.mir.command110.Annotation\"_\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12!\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x14.mir.command110.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"\xdc\x02\n\x0bMirKeywords\x12;\n\x08keywords\x18\x01 \x03(\x0b\x32).mir.command110.MirKeywords.KeywordsEntry\x12W\n\x17index_predifined_keyids\x18\x06 \x03(\x0b\x32\x36.mir.command110.MirKeywords.IndexPredifinedKeyidsEntry\x1aI\n\rKeywordsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.mir.command110.Keywords:\x02\x38\x01\x1aT\n\x1aIndexPredifinedKeyidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.mir.command110.Assets:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"\x1b\n\x06\x41ssets\x12\x11\n\tasset_ids\x18\x01 \x03(\t\"B\n\x08Keywords\x12\x19\n\x11predifined_keyids\x18\x01 \x03(\x05\x12\x1b\n\x13\x63ustomized_keywords\x18\x02 \x03(\t\"\x98\x01\n\x08MirTasks\x12\x32\n\x05tasks\x18\x01 \x03(\x0b\x32#.mir.command110.MirTasks.TasksEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a\x42\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.mir.command110.Task:\x02\x38\x01\"\xe6\x03\n\x04Task\x12&\n\x04type\x18\x01 \x01(\x0e\x32\x18.mir.command110.TaskType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07task_id\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\x12(\n\x05model\x18\x06 \x01(\x0b\x32\x19.mir.command110.ModelMeta\x12=\n\runknown_types\x18\x07 \x03(\x0b\x32&.mir.command110.Task.UnknownTypesEntry\x12\x13\n\x0breturn_code\x18\x08 \x01(\x05\x12\x12\n\nreturn_msg\x18\t \x01(\t\x12.\n\nevaluation\x18\n \x01(\x0b\x32\x1a.mir.command110.Evaluation\x12\"\n\x1aserialized_task_parameters\x18\x66 \x01(\t\x12\"\n\x1aserialized_executor_config\x18g \x01(\t\x12\x10\n\x08src_revs\x18h \x01(\t\x12\x0f\n\x07\x64st_rev\x18i \x01(\t\x12\x10\n\x08\x65xecutor\x18j \x01(\t\x1a\x33\n\x11UnknownTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x64\x10\x65J\x04\x08\x65\x10\x66\"P\n\tModelMeta\x12\x12\n\nmodel_hash\x18\x01 \x01(\t\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\"\xf1\x01\n\nEvaluation\x12.\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x1e.mir.command110.EvaluateConfig\x12O\n\x13\x64\x61taset_evaluations\x18\x02 \x03(\x0b\x32\x32.mir.command110.Evaluation.DatasetEvaluationsEntry\x1a\x62\n\x17\x44\x61tasetEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.mir.command110.SingleDatasetEvaluation:\x02\x38\x01\"\x85\x01\n\x0e\x45valuateConfig\x12\x15\n\rgt_dataset_id\x18\x01 \x01(\t\x12\x18\n\x10pred_dataset_ids\x18\x02 \x03(\t\x12\x10\n\x08\x63onf_thr\x18\x03 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x04 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x05 \x01(\x08\"\xd3\x02\n\x17SingleDatasetEvaluation\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12\x15\n\rgt_dataset_id\x18\x02 \x01(\t\x12\x17\n\x0fpred_dataset_id\x18\x03 \x01(\t\x12T\n\x0fiou_evaluations\x18\x04 \x03(\x0b\x32;.mir.command110.SingleDatasetEvaluation.IouEvaluationsEntry\x12\x44\n\x17iou_averaged_evaluation\x18\x05 \x01(\x0b\x32#.mir.command110.SingleIouEvaluation\x1aZ\n\x13IouEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command110.SingleIouEvaluation:\x02\x38\x01\"\xbf\x03\n\x13SingleIouEvaluation\x12N\n\x0e\x63i_evaluations\x18\x01 \x03(\x0b\x32\x36.mir.command110.SingleIouEvaluation.CiEvaluationsEntry\x12\x45\n\x16\x63i_averaged_evaluation\x18\x02 \x01(\x0b\x32%.mir.command110.SingleTopicEvaluation\x12T\n\x11topic_evaluations\x18\x03 \x03(\x0b\x32\x39.mir.command110.SingleIouEvaluation.TopicEvaluationsEntry\x1a[\n\x12\x43iEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.mir.command110.SingleTopicEvaluation:\x02\x38\x01\x1a^\n\x15TopicEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.mir.command110.SingleTopicEvaluation:\x02\x38\x01\"\x81\x01\n\x15SingleTopicEvaluation\x12\n\n\x02\x61p\x18\x01 \x01(\x02\x12\n\n\x02\x61r\x18\x02 \x01(\x02\x12\n\n\x02tp\x18\x03 \x01(\x05\x12\n\n\x02\x66p\x18\x04 \x01(\x05\x12\n\n\x02\x66n\x18\x05 \x01(\x05\x12,\n\x08pr_curve\x18\x06 \x03(\x0b\x32\x1a.mir.command110.FloatPoint\"\"\n\nFloatPoint\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"\xae\x04\n\nMirContext\x12\x12\n\nimages_cnt\x18\x01 \x01(\x05\x12\x1b\n\x13negative_images_cnt\x18\x02 \x01(\x05\x12#\n\x1bproject_negative_images_cnt\x18\x03 \x01(\x05\x12R\n\x15predefined_keyids_cnt\x18\x04 \x03(\x0b\x32\x33.mir.command110.MirContext.PredefinedKeyidsCntEntry\x12\x61\n\x1dproject_predefined_keyids_cnt\x18\x05 \x03(\x0b\x32:.mir.command110.MirContext.ProjectPredefinedKeyidsCntEntry\x12V\n\x17\x63ustomized_keywords_cnt\x18\x06 \x03(\x0b\x32\x35.mir.command110.MirContext.CustomizedKeywordsCntEntry\x1a:\n\x18PredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x41\n\x1fProjectPredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a<\n\x1a\x43ustomizedKeywordsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\x88\x02\n\tAssetType\x12\x14\n\x10\x41ssetTypeUnknown\x10\x00\x12\x16\n\x12\x41ssetTypeImageJpeg\x10\x01\x12\x15\n\x11\x41ssetTypeImagePng\x10\x02\x12\x1a\n\x16\x41ssetTypeImagePixelMat\x10\x03\x12\x19\n\x15\x41ssetTypeImageYuv420p\x10\x04\x12\x1a\n\x16\x41ssetTypeImageYuv420sp\x10\x05\x12\x19\n\x15\x41ssetTypeImageYuv422p\x10\x06\x12\x1a\n\x16\x41ssetTypeImageYuv422sp\x10\x07\x12\x15\n\x11\x41ssetTypeImageBmp\x10\x08\x12\x15\n\x11\x41ssetTypeVideoMp4\x10\x65*\xd3\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x10\n\x0cTaskTypeInit\x10\x0c\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x14\n\x10TaskTypeEvaluate\x10\x10\"\x04\x08\x0e\x10\x0e\"\x04\x08\x0f\x10\x0f*\x87\x01\n\tTaskState\x12\x14\n\x10TaskStateUnknown\x10\x00\x12\x14\n\x10TaskStatePending\x10\x01\x12\x14\n\x10TaskStateRunning\x10\x02\x12\x11\n\rTaskStateDone\x10\x03\x12\x12\n\x0eTaskStateError\x10\x04\x12\x11\n\rTaskStateMiss\x10\x05*L\n\x08Sha1Type\x12\x15\n\x11SHA1_TYPE_UNKNOWN\x10\x00\x12\x13\n\x0fSHA1_TYPE_ASSET\x10\x01\x12\x14\n\x10SHA1_TYPE_COMMIT\x10\x02*f\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03\x12\x0f\n\x0bMIR_CONTEXT\x10\x04*<\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x62\x06proto3' +) + +_TVTTYPE = _descriptor.EnumDescriptor( + name='TvtType', + full_name='mir.command110.TvtType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TvtTypeUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TvtTypeTraining', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TvtTypeValidation', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TvtTypeTest', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=4218, + serialized_end=4308, +) +_sym_db.RegisterEnumDescriptor(_TVTTYPE) + +TvtType = enum_type_wrapper.EnumTypeWrapper(_TVTTYPE) +_ASSETTYPE = _descriptor.EnumDescriptor( + name='AssetType', + full_name='mir.command110.AssetType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AssetTypeUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageJpeg', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImagePng', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImagePixelMat', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv420p', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv420sp', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv422p', index=6, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv422sp', index=7, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageBmp', index=8, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeVideoMp4', index=9, number=101, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=4311, + serialized_end=4575, +) +_sym_db.RegisterEnumDescriptor(_ASSETTYPE) + +AssetType = enum_type_wrapper.EnumTypeWrapper(_ASSETTYPE) +_TASKTYPE = _descriptor.EnumDescriptor( + name='TaskType', + full_name='mir.command110.TaskType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TaskTypeUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeTraining', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeMining', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeLabel', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeFilter', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeImportData', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeExportData', index=6, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeCopyData', index=7, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeMerge', index=8, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeInfer', index=9, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeSampling', index=10, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeFusion', index=11, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeInit', index=12, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeImportModel', index=13, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeEvaluate', index=14, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=4578, + serialized_end=4917, +) +_sym_db.RegisterEnumDescriptor(_TASKTYPE) + +TaskType = enum_type_wrapper.EnumTypeWrapper(_TASKTYPE) +_TASKSTATE = _descriptor.EnumDescriptor( + name='TaskState', + full_name='mir.command110.TaskState', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TaskStateUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStatePending', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateRunning', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateDone', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateError', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateMiss', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=4920, + serialized_end=5055, +) +_sym_db.RegisterEnumDescriptor(_TASKSTATE) + +TaskState = enum_type_wrapper.EnumTypeWrapper(_TASKSTATE) +_SHA1TYPE = _descriptor.EnumDescriptor( + name='Sha1Type', + full_name='mir.command110.Sha1Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SHA1_TYPE_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='SHA1_TYPE_ASSET', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='SHA1_TYPE_COMMIT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=5057, + serialized_end=5133, +) +_sym_db.RegisterEnumDescriptor(_SHA1TYPE) + +Sha1Type = enum_type_wrapper.EnumTypeWrapper(_SHA1TYPE) +_MIRSTORAGE = _descriptor.EnumDescriptor( + name='MirStorage', + full_name='mir.command110.MirStorage', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='MIR_METADATAS', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_ANNOTATIONS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_KEYWORDS', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_TASKS', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_CONTEXT', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=5135, + serialized_end=5237, +) +_sym_db.RegisterEnumDescriptor(_MIRSTORAGE) + +MirStorage = enum_type_wrapper.EnumTypeWrapper(_MIRSTORAGE) +_LABELFORMAT = _descriptor.EnumDescriptor( + name='LabelFormat', + full_name='mir.command110.LabelFormat', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='NO_ANNOTATION', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='PASCAL_VOC', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IF_ARK', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=5239, + serialized_end=5299, +) +_sym_db.RegisterEnumDescriptor(_LABELFORMAT) + +LabelFormat = enum_type_wrapper.EnumTypeWrapper(_LABELFORMAT) +TvtTypeUnknown = 0 +TvtTypeTraining = 1 +TvtTypeValidation = 2 +TvtTypeTest = 3 +AssetTypeUnknown = 0 +AssetTypeImageJpeg = 1 +AssetTypeImagePng = 2 +AssetTypeImagePixelMat = 3 +AssetTypeImageYuv420p = 4 +AssetTypeImageYuv420sp = 5 +AssetTypeImageYuv422p = 6 +AssetTypeImageYuv422sp = 7 +AssetTypeImageBmp = 8 +AssetTypeVideoMp4 = 101 +TaskTypeUnknown = 0 +TaskTypeTraining = 1 +TaskTypeMining = 2 +TaskTypeLabel = 3 +TaskTypeFilter = 4 +TaskTypeImportData = 5 +TaskTypeExportData = 6 +TaskTypeCopyData = 7 +TaskTypeMerge = 8 +TaskTypeInfer = 9 +TaskTypeSampling = 10 +TaskTypeFusion = 11 +TaskTypeInit = 12 +TaskTypeImportModel = 13 +TaskTypeEvaluate = 16 +TaskStateUnknown = 0 +TaskStatePending = 1 +TaskStateRunning = 2 +TaskStateDone = 3 +TaskStateError = 4 +TaskStateMiss = 5 +SHA1_TYPE_UNKNOWN = 0 +SHA1_TYPE_ASSET = 1 +SHA1_TYPE_COMMIT = 2 +MIR_METADATAS = 0 +MIR_ANNOTATIONS = 1 +MIR_KEYWORDS = 2 +MIR_TASKS = 3 +MIR_CONTEXT = 4 +NO_ANNOTATION = 0 +PASCAL_VOC = 1 +IF_ARK = 2 + + + +_MIRMETADATAS_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='mir.command110.MirMetadatas.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirMetadatas.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirMetadatas.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=124, + serialized_end=209, +) + +_MIRMETADATAS = _descriptor.Descriptor( + name='MirMetadatas', + full_name='mir.command110.MirMetadatas', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='attributes', full_name='mir.command110.MirMetadatas.attributes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRMETADATAS_ATTRIBUTESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=42, + serialized_end=209, +) + + +_METADATAATTRIBUTES = _descriptor.Descriptor( + name='MetadataAttributes', + full_name='mir.command110.MetadataAttributes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='dataset_name', full_name='mir.command110.MetadataAttributes.dataset_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='timestamp', full_name='mir.command110.MetadataAttributes.timestamp', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tvt_type', full_name='mir.command110.MetadataAttributes.tvt_type', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='asset_type', full_name='mir.command110.MetadataAttributes.asset_type', index=3, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='width', full_name='mir.command110.MetadataAttributes.width', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='height', full_name='mir.command110.MetadataAttributes.height', index=5, + number=6, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='image_channels', full_name='mir.command110.MetadataAttributes.image_channels', index=6, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=212, + serialized_end=445, +) + + +_TIMESTAMP = _descriptor.Descriptor( + name='Timestamp', + full_name='mir.command110.Timestamp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='mir.command110.Timestamp.start', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='duration', full_name='mir.command110.Timestamp.duration', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=447, + serialized_end=491, +) + + +_MIRANNOTATIONS_TASKANNOTATIONSENTRY = _descriptor.Descriptor( + name='TaskAnnotationsEntry', + full_name='mir.command110.MirAnnotations.TaskAnnotationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirAnnotations.TaskAnnotationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirAnnotations.TaskAnnotationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=613, + serialized_end=706, +) + +_MIRANNOTATIONS = _descriptor.Descriptor( + name='MirAnnotations', + full_name='mir.command110.MirAnnotations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='task_annotations', full_name='mir.command110.MirAnnotations.task_annotations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='head_task_id', full_name='mir.command110.MirAnnotations.head_task_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRANNOTATIONS_TASKANNOTATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=494, + serialized_end=706, +) + + +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY = _descriptor.Descriptor( + name='ImageAnnotationsEntry', + full_name='mir.command110.SingleTaskAnnotations.ImageAnnotationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.SingleTaskAnnotations.ImageAnnotationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.SingleTaskAnnotations.ImageAnnotationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=822, + serialized_end=917, +) + +_SINGLETASKANNOTATIONS = _descriptor.Descriptor( + name='SingleTaskAnnotations', + full_name='mir.command110.SingleTaskAnnotations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='image_annotations', full_name='mir.command110.SingleTaskAnnotations.image_annotations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=709, + serialized_end=917, +) + + +_SINGLEIMAGEANNOTATIONS = _descriptor.Descriptor( + name='SingleImageAnnotations', + full_name='mir.command110.SingleImageAnnotations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='annotations', full_name='mir.command110.SingleImageAnnotations.annotations', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=919, + serialized_end=992, +) + + +_ANNOTATION = _descriptor.Descriptor( + name='Annotation', + full_name='mir.command110.Annotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='mir.command110.Annotation.index', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='box', full_name='mir.command110.Annotation.box', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_id', full_name='mir.command110.Annotation.class_id', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='score', full_name='mir.command110.Annotation.score', index=3, + number=4, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=994, + serialized_end=1089, +) + + +_RECT = _descriptor.Descriptor( + name='Rect', + full_name='mir.command110.Rect', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command110.Rect.x', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command110.Rect.y', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='w', full_name='mir.command110.Rect.w', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='h', full_name='mir.command110.Rect.h', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1091, + serialized_end=1141, +) + + +_MIRKEYWORDS_KEYWORDSENTRY = _descriptor.Descriptor( + name='KeywordsEntry', + full_name='mir.command110.MirKeywords.KeywordsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirKeywords.KeywordsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirKeywords.KeywordsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1309, + serialized_end=1382, +) + +_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY = _descriptor.Descriptor( + name='IndexPredifinedKeyidsEntry', + full_name='mir.command110.MirKeywords.IndexPredifinedKeyidsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirKeywords.IndexPredifinedKeyidsEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirKeywords.IndexPredifinedKeyidsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1384, + serialized_end=1468, +) + +_MIRKEYWORDS = _descriptor.Descriptor( + name='MirKeywords', + full_name='mir.command110.MirKeywords', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='keywords', full_name='mir.command110.MirKeywords.keywords', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='index_predifined_keyids', full_name='mir.command110.MirKeywords.index_predifined_keyids', index=1, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRKEYWORDS_KEYWORDSENTRY, _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1144, + serialized_end=1492, +) + + +_ASSETS = _descriptor.Descriptor( + name='Assets', + full_name='mir.command110.Assets', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='asset_ids', full_name='mir.command110.Assets.asset_ids', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1494, + serialized_end=1521, +) + + +_KEYWORDS = _descriptor.Descriptor( + name='Keywords', + full_name='mir.command110.Keywords', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='predifined_keyids', full_name='mir.command110.Keywords.predifined_keyids', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='customized_keywords', full_name='mir.command110.Keywords.customized_keywords', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1523, + serialized_end=1589, +) + + +_MIRTASKS_TASKSENTRY = _descriptor.Descriptor( + name='TasksEntry', + full_name='mir.command110.MirTasks.TasksEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirTasks.TasksEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirTasks.TasksEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1678, + serialized_end=1744, +) + +_MIRTASKS = _descriptor.Descriptor( + name='MirTasks', + full_name='mir.command110.MirTasks', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='tasks', full_name='mir.command110.MirTasks.tasks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='head_task_id', full_name='mir.command110.MirTasks.head_task_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRTASKS_TASKSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1592, + serialized_end=1744, +) + + +_TASK_UNKNOWNTYPESENTRY = _descriptor.Descriptor( + name='UnknownTypesEntry', + full_name='mir.command110.Task.UnknownTypesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.Task.UnknownTypesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.Task.UnknownTypesEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2164, + serialized_end=2215, +) + +_TASK = _descriptor.Descriptor( + name='Task', + full_name='mir.command110.Task', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='mir.command110.Task.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='name', full_name='mir.command110.Task.name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='task_id', full_name='mir.command110.Task.task_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='timestamp', full_name='mir.command110.Task.timestamp', index=3, + number=5, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model', full_name='mir.command110.Task.model', index=4, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='unknown_types', full_name='mir.command110.Task.unknown_types', index=5, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='return_code', full_name='mir.command110.Task.return_code', index=6, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='return_msg', full_name='mir.command110.Task.return_msg', index=7, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='evaluation', full_name='mir.command110.Task.evaluation', index=8, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='serialized_task_parameters', full_name='mir.command110.Task.serialized_task_parameters', index=9, + number=102, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='serialized_executor_config', full_name='mir.command110.Task.serialized_executor_config', index=10, + number=103, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='src_revs', full_name='mir.command110.Task.src_revs', index=11, + number=104, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dst_rev', full_name='mir.command110.Task.dst_rev', index=12, + number=105, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='executor', full_name='mir.command110.Task.executor', index=13, + number=106, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_TASK_UNKNOWNTYPESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1747, + serialized_end=2233, +) + + +_MODELMETA = _descriptor.Descriptor( + name='ModelMeta', + full_name='mir.command110.ModelMeta', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='model_hash', full_name='mir.command110.ModelMeta.model_hash', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mean_average_precision', full_name='mir.command110.ModelMeta.mean_average_precision', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='context', full_name='mir.command110.ModelMeta.context', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2235, + serialized_end=2315, +) + + +_EVALUATION_DATASETEVALUATIONSENTRY = _descriptor.Descriptor( + name='DatasetEvaluationsEntry', + full_name='mir.command110.Evaluation.DatasetEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.Evaluation.DatasetEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.Evaluation.DatasetEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2461, + serialized_end=2559, +) + +_EVALUATION = _descriptor.Descriptor( + name='Evaluation', + full_name='mir.command110.Evaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='config', full_name='mir.command110.Evaluation.config', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dataset_evaluations', full_name='mir.command110.Evaluation.dataset_evaluations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_EVALUATION_DATASETEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2318, + serialized_end=2559, +) + + +_EVALUATECONFIG = _descriptor.Descriptor( + name='EvaluateConfig', + full_name='mir.command110.EvaluateConfig', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='gt_dataset_id', full_name='mir.command110.EvaluateConfig.gt_dataset_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_dataset_ids', full_name='mir.command110.EvaluateConfig.pred_dataset_ids', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='conf_thr', full_name='mir.command110.EvaluateConfig.conf_thr', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_thrs_interval', full_name='mir.command110.EvaluateConfig.iou_thrs_interval', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='need_pr_curve', full_name='mir.command110.EvaluateConfig.need_pr_curve', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2562, + serialized_end=2695, +) + + +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY = _descriptor.Descriptor( + name='IouEvaluationsEntry', + full_name='mir.command110.SingleDatasetEvaluation.IouEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.SingleDatasetEvaluation.IouEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.SingleDatasetEvaluation.IouEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2947, + serialized_end=3037, +) + +_SINGLEDATASETEVALUATION = _descriptor.Descriptor( + name='SingleDatasetEvaluation', + full_name='mir.command110.SingleDatasetEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='conf_thr', full_name='mir.command110.SingleDatasetEvaluation.conf_thr', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_dataset_id', full_name='mir.command110.SingleDatasetEvaluation.gt_dataset_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_dataset_id', full_name='mir.command110.SingleDatasetEvaluation.pred_dataset_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_evaluations', full_name='mir.command110.SingleDatasetEvaluation.iou_evaluations', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_averaged_evaluation', full_name='mir.command110.SingleDatasetEvaluation.iou_averaged_evaluation', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2698, + serialized_end=3037, +) + + +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY = _descriptor.Descriptor( + name='CiEvaluationsEntry', + full_name='mir.command110.SingleIouEvaluation.CiEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.SingleIouEvaluation.CiEvaluationsEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.SingleIouEvaluation.CiEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3300, + serialized_end=3391, +) + +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY = _descriptor.Descriptor( + name='TopicEvaluationsEntry', + full_name='mir.command110.SingleIouEvaluation.TopicEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.SingleIouEvaluation.TopicEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.SingleIouEvaluation.TopicEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3393, + serialized_end=3487, +) + +_SINGLEIOUEVALUATION = _descriptor.Descriptor( + name='SingleIouEvaluation', + full_name='mir.command110.SingleIouEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ci_evaluations', full_name='mir.command110.SingleIouEvaluation.ci_evaluations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ci_averaged_evaluation', full_name='mir.command110.SingleIouEvaluation.ci_averaged_evaluation', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='topic_evaluations', full_name='mir.command110.SingleIouEvaluation.topic_evaluations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3040, + serialized_end=3487, +) + + +_SINGLETOPICEVALUATION = _descriptor.Descriptor( + name='SingleTopicEvaluation', + full_name='mir.command110.SingleTopicEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ap', full_name='mir.command110.SingleTopicEvaluation.ap', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ar', full_name='mir.command110.SingleTopicEvaluation.ar', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tp', full_name='mir.command110.SingleTopicEvaluation.tp', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fp', full_name='mir.command110.SingleTopicEvaluation.fp', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fn', full_name='mir.command110.SingleTopicEvaluation.fn', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pr_curve', full_name='mir.command110.SingleTopicEvaluation.pr_curve', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3490, + serialized_end=3619, +) + + +_FLOATPOINT = _descriptor.Descriptor( + name='FloatPoint', + full_name='mir.command110.FloatPoint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command110.FloatPoint.x', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command110.FloatPoint.y', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3621, + serialized_end=3655, +) + + +_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY = _descriptor.Descriptor( + name='PredefinedKeyidsCntEntry', + full_name='mir.command110.MirContext.PredefinedKeyidsCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirContext.PredefinedKeyidsCntEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirContext.PredefinedKeyidsCntEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4029, + serialized_end=4087, +) + +_MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY = _descriptor.Descriptor( + name='ProjectPredefinedKeyidsCntEntry', + full_name='mir.command110.MirContext.ProjectPredefinedKeyidsCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirContext.ProjectPredefinedKeyidsCntEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirContext.ProjectPredefinedKeyidsCntEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4089, + serialized_end=4154, +) + +_MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY = _descriptor.Descriptor( + name='CustomizedKeywordsCntEntry', + full_name='mir.command110.MirContext.CustomizedKeywordsCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command110.MirContext.CustomizedKeywordsCntEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command110.MirContext.CustomizedKeywordsCntEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4156, + serialized_end=4216, +) + +_MIRCONTEXT = _descriptor.Descriptor( + name='MirContext', + full_name='mir.command110.MirContext', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='images_cnt', full_name='mir.command110.MirContext.images_cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_images_cnt', full_name='mir.command110.MirContext.negative_images_cnt', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='project_negative_images_cnt', full_name='mir.command110.MirContext.project_negative_images_cnt', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='predefined_keyids_cnt', full_name='mir.command110.MirContext.predefined_keyids_cnt', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='project_predefined_keyids_cnt', full_name='mir.command110.MirContext.project_predefined_keyids_cnt', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='customized_keywords_cnt', full_name='mir.command110.MirContext.customized_keywords_cnt', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY, _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY, _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3658, + serialized_end=4216, +) + +_MIRMETADATAS_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _METADATAATTRIBUTES +_MIRMETADATAS_ATTRIBUTESENTRY.containing_type = _MIRMETADATAS +_MIRMETADATAS.fields_by_name['attributes'].message_type = _MIRMETADATAS_ATTRIBUTESENTRY +_METADATAATTRIBUTES.fields_by_name['timestamp'].message_type = _TIMESTAMP +_METADATAATTRIBUTES.fields_by_name['tvt_type'].enum_type = _TVTTYPE +_METADATAATTRIBUTES.fields_by_name['asset_type'].enum_type = _ASSETTYPE +_MIRANNOTATIONS_TASKANNOTATIONSENTRY.fields_by_name['value'].message_type = _SINGLETASKANNOTATIONS +_MIRANNOTATIONS_TASKANNOTATIONSENTRY.containing_type = _MIRANNOTATIONS +_MIRANNOTATIONS.fields_by_name['task_annotations'].message_type = _MIRANNOTATIONS_TASKANNOTATIONSENTRY +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIMAGEANNOTATIONS +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY.containing_type = _SINGLETASKANNOTATIONS +_SINGLETASKANNOTATIONS.fields_by_name['image_annotations'].message_type = _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY +_SINGLEIMAGEANNOTATIONS.fields_by_name['annotations'].message_type = _ANNOTATION +_ANNOTATION.fields_by_name['box'].message_type = _RECT +_MIRKEYWORDS_KEYWORDSENTRY.fields_by_name['value'].message_type = _KEYWORDS +_MIRKEYWORDS_KEYWORDSENTRY.containing_type = _MIRKEYWORDS +_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY.fields_by_name['value'].message_type = _ASSETS +_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY.containing_type = _MIRKEYWORDS +_MIRKEYWORDS.fields_by_name['keywords'].message_type = _MIRKEYWORDS_KEYWORDSENTRY +_MIRKEYWORDS.fields_by_name['index_predifined_keyids'].message_type = _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY +_MIRTASKS_TASKSENTRY.fields_by_name['value'].message_type = _TASK +_MIRTASKS_TASKSENTRY.containing_type = _MIRTASKS +_MIRTASKS.fields_by_name['tasks'].message_type = _MIRTASKS_TASKSENTRY +_TASK_UNKNOWNTYPESENTRY.containing_type = _TASK +_TASK.fields_by_name['type'].enum_type = _TASKTYPE +_TASK.fields_by_name['model'].message_type = _MODELMETA +_TASK.fields_by_name['unknown_types'].message_type = _TASK_UNKNOWNTYPESENTRY +_TASK.fields_by_name['evaluation'].message_type = _EVALUATION +_EVALUATION_DATASETEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION_DATASETEVALUATIONSENTRY.containing_type = _EVALUATION +_EVALUATION.fields_by_name['config'].message_type = _EVALUATECONFIG +_EVALUATION.fields_by_name['dataset_evaluations'].message_type = _EVALUATION_DATASETEVALUATIONSENTRY +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIOUEVALUATION +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.containing_type = _SINGLEDATASETEVALUATION +_SINGLEDATASETEVALUATION.fields_by_name['iou_evaluations'].message_type = _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY +_SINGLEDATASETEVALUATION.fields_by_name['iou_averaged_evaluation'].message_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION.fields_by_name['ci_evaluations'].message_type = _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY +_SINGLEIOUEVALUATION.fields_by_name['ci_averaged_evaluation'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION.fields_by_name['topic_evaluations'].message_type = _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY +_SINGLETOPICEVALUATION.fields_by_name['pr_curve'].message_type = _FLOATPOINT +_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY.containing_type = _MIRCONTEXT +_MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY.containing_type = _MIRCONTEXT +_MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY.containing_type = _MIRCONTEXT +_MIRCONTEXT.fields_by_name['predefined_keyids_cnt'].message_type = _MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY +_MIRCONTEXT.fields_by_name['project_predefined_keyids_cnt'].message_type = _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY +_MIRCONTEXT.fields_by_name['customized_keywords_cnt'].message_type = _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY +DESCRIPTOR.message_types_by_name['MirMetadatas'] = _MIRMETADATAS +DESCRIPTOR.message_types_by_name['MetadataAttributes'] = _METADATAATTRIBUTES +DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP +DESCRIPTOR.message_types_by_name['MirAnnotations'] = _MIRANNOTATIONS +DESCRIPTOR.message_types_by_name['SingleTaskAnnotations'] = _SINGLETASKANNOTATIONS +DESCRIPTOR.message_types_by_name['SingleImageAnnotations'] = _SINGLEIMAGEANNOTATIONS +DESCRIPTOR.message_types_by_name['Annotation'] = _ANNOTATION +DESCRIPTOR.message_types_by_name['Rect'] = _RECT +DESCRIPTOR.message_types_by_name['MirKeywords'] = _MIRKEYWORDS +DESCRIPTOR.message_types_by_name['Assets'] = _ASSETS +DESCRIPTOR.message_types_by_name['Keywords'] = _KEYWORDS +DESCRIPTOR.message_types_by_name['MirTasks'] = _MIRTASKS +DESCRIPTOR.message_types_by_name['Task'] = _TASK +DESCRIPTOR.message_types_by_name['ModelMeta'] = _MODELMETA +DESCRIPTOR.message_types_by_name['Evaluation'] = _EVALUATION +DESCRIPTOR.message_types_by_name['EvaluateConfig'] = _EVALUATECONFIG +DESCRIPTOR.message_types_by_name['SingleDatasetEvaluation'] = _SINGLEDATASETEVALUATION +DESCRIPTOR.message_types_by_name['SingleIouEvaluation'] = _SINGLEIOUEVALUATION +DESCRIPTOR.message_types_by_name['SingleTopicEvaluation'] = _SINGLETOPICEVALUATION +DESCRIPTOR.message_types_by_name['FloatPoint'] = _FLOATPOINT +DESCRIPTOR.message_types_by_name['MirContext'] = _MIRCONTEXT +DESCRIPTOR.enum_types_by_name['TvtType'] = _TVTTYPE +DESCRIPTOR.enum_types_by_name['AssetType'] = _ASSETTYPE +DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE +DESCRIPTOR.enum_types_by_name['TaskState'] = _TASKSTATE +DESCRIPTOR.enum_types_by_name['Sha1Type'] = _SHA1TYPE +DESCRIPTOR.enum_types_by_name['MirStorage'] = _MIRSTORAGE +DESCRIPTOR.enum_types_by_name['LabelFormat'] = _LABELFORMAT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +MirMetadatas = _reflection.GeneratedProtocolMessageType('MirMetadatas', (_message.Message,), { + + 'AttributesEntry' : _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRMETADATAS_ATTRIBUTESENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirMetadatas.AttributesEntry) + }) + , + 'DESCRIPTOR' : _MIRMETADATAS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirMetadatas) + }) +_sym_db.RegisterMessage(MirMetadatas) +_sym_db.RegisterMessage(MirMetadatas.AttributesEntry) + +MetadataAttributes = _reflection.GeneratedProtocolMessageType('MetadataAttributes', (_message.Message,), { + 'DESCRIPTOR' : _METADATAATTRIBUTES, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MetadataAttributes) + }) +_sym_db.RegisterMessage(MetadataAttributes) + +Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), { + 'DESCRIPTOR' : _TIMESTAMP, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Timestamp) + }) +_sym_db.RegisterMessage(Timestamp) + +MirAnnotations = _reflection.GeneratedProtocolMessageType('MirAnnotations', (_message.Message,), { + + 'TaskAnnotationsEntry' : _reflection.GeneratedProtocolMessageType('TaskAnnotationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRANNOTATIONS_TASKANNOTATIONSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirAnnotations.TaskAnnotationsEntry) + }) + , + 'DESCRIPTOR' : _MIRANNOTATIONS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirAnnotations) + }) +_sym_db.RegisterMessage(MirAnnotations) +_sym_db.RegisterMessage(MirAnnotations.TaskAnnotationsEntry) + +SingleTaskAnnotations = _reflection.GeneratedProtocolMessageType('SingleTaskAnnotations', (_message.Message,), { + + 'ImageAnnotationsEntry' : _reflection.GeneratedProtocolMessageType('ImageAnnotationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleTaskAnnotations.ImageAnnotationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLETASKANNOTATIONS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleTaskAnnotations) + }) +_sym_db.RegisterMessage(SingleTaskAnnotations) +_sym_db.RegisterMessage(SingleTaskAnnotations.ImageAnnotationsEntry) + +SingleImageAnnotations = _reflection.GeneratedProtocolMessageType('SingleImageAnnotations', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIMAGEANNOTATIONS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleImageAnnotations) + }) +_sym_db.RegisterMessage(SingleImageAnnotations) + +Annotation = _reflection.GeneratedProtocolMessageType('Annotation', (_message.Message,), { + 'DESCRIPTOR' : _ANNOTATION, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Annotation) + }) +_sym_db.RegisterMessage(Annotation) + +Rect = _reflection.GeneratedProtocolMessageType('Rect', (_message.Message,), { + 'DESCRIPTOR' : _RECT, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Rect) + }) +_sym_db.RegisterMessage(Rect) + +MirKeywords = _reflection.GeneratedProtocolMessageType('MirKeywords', (_message.Message,), { + + 'KeywordsEntry' : _reflection.GeneratedProtocolMessageType('KeywordsEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRKEYWORDS_KEYWORDSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirKeywords.KeywordsEntry) + }) + , + + 'IndexPredifinedKeyidsEntry' : _reflection.GeneratedProtocolMessageType('IndexPredifinedKeyidsEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirKeywords.IndexPredifinedKeyidsEntry) + }) + , + 'DESCRIPTOR' : _MIRKEYWORDS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirKeywords) + }) +_sym_db.RegisterMessage(MirKeywords) +_sym_db.RegisterMessage(MirKeywords.KeywordsEntry) +_sym_db.RegisterMessage(MirKeywords.IndexPredifinedKeyidsEntry) + +Assets = _reflection.GeneratedProtocolMessageType('Assets', (_message.Message,), { + 'DESCRIPTOR' : _ASSETS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Assets) + }) +_sym_db.RegisterMessage(Assets) + +Keywords = _reflection.GeneratedProtocolMessageType('Keywords', (_message.Message,), { + 'DESCRIPTOR' : _KEYWORDS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Keywords) + }) +_sym_db.RegisterMessage(Keywords) + +MirTasks = _reflection.GeneratedProtocolMessageType('MirTasks', (_message.Message,), { + + 'TasksEntry' : _reflection.GeneratedProtocolMessageType('TasksEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRTASKS_TASKSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirTasks.TasksEntry) + }) + , + 'DESCRIPTOR' : _MIRTASKS, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirTasks) + }) +_sym_db.RegisterMessage(MirTasks) +_sym_db.RegisterMessage(MirTasks.TasksEntry) + +Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), { + + 'UnknownTypesEntry' : _reflection.GeneratedProtocolMessageType('UnknownTypesEntry', (_message.Message,), { + 'DESCRIPTOR' : _TASK_UNKNOWNTYPESENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Task.UnknownTypesEntry) + }) + , + 'DESCRIPTOR' : _TASK, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Task) + }) +_sym_db.RegisterMessage(Task) +_sym_db.RegisterMessage(Task.UnknownTypesEntry) + +ModelMeta = _reflection.GeneratedProtocolMessageType('ModelMeta', (_message.Message,), { + 'DESCRIPTOR' : _MODELMETA, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.ModelMeta) + }) +_sym_db.RegisterMessage(ModelMeta) + +Evaluation = _reflection.GeneratedProtocolMessageType('Evaluation', (_message.Message,), { + + 'DatasetEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('DatasetEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATION_DATASETEVALUATIONSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Evaluation.DatasetEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _EVALUATION, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.Evaluation) + }) +_sym_db.RegisterMessage(Evaluation) +_sym_db.RegisterMessage(Evaluation.DatasetEvaluationsEntry) + +EvaluateConfig = _reflection.GeneratedProtocolMessageType('EvaluateConfig', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATECONFIG, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.EvaluateConfig) + }) +_sym_db.RegisterMessage(EvaluateConfig) + +SingleDatasetEvaluation = _reflection.GeneratedProtocolMessageType('SingleDatasetEvaluation', (_message.Message,), { + + 'IouEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('IouEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleDatasetEvaluation.IouEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLEDATASETEVALUATION, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleDatasetEvaluation) + }) +_sym_db.RegisterMessage(SingleDatasetEvaluation) +_sym_db.RegisterMessage(SingleDatasetEvaluation.IouEvaluationsEntry) + +SingleIouEvaluation = _reflection.GeneratedProtocolMessageType('SingleIouEvaluation', (_message.Message,), { + + 'CiEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('CiEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleIouEvaluation.CiEvaluationsEntry) + }) + , + + 'TopicEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('TopicEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleIouEvaluation.TopicEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLEIOUEVALUATION, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleIouEvaluation) + }) +_sym_db.RegisterMessage(SingleIouEvaluation) +_sym_db.RegisterMessage(SingleIouEvaluation.CiEvaluationsEntry) +_sym_db.RegisterMessage(SingleIouEvaluation.TopicEvaluationsEntry) + +SingleTopicEvaluation = _reflection.GeneratedProtocolMessageType('SingleTopicEvaluation', (_message.Message,), { + 'DESCRIPTOR' : _SINGLETOPICEVALUATION, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.SingleTopicEvaluation) + }) +_sym_db.RegisterMessage(SingleTopicEvaluation) + +FloatPoint = _reflection.GeneratedProtocolMessageType('FloatPoint', (_message.Message,), { + 'DESCRIPTOR' : _FLOATPOINT, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.FloatPoint) + }) +_sym_db.RegisterMessage(FloatPoint) + +MirContext = _reflection.GeneratedProtocolMessageType('MirContext', (_message.Message,), { + + 'PredefinedKeyidsCntEntry' : _reflection.GeneratedProtocolMessageType('PredefinedKeyidsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirContext.PredefinedKeyidsCntEntry) + }) + , + + 'ProjectPredefinedKeyidsCntEntry' : _reflection.GeneratedProtocolMessageType('ProjectPredefinedKeyidsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirContext.ProjectPredefinedKeyidsCntEntry) + }) + , + + 'CustomizedKeywordsCntEntry' : _reflection.GeneratedProtocolMessageType('CustomizedKeywordsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirContext.CustomizedKeywordsCntEntry) + }) + , + 'DESCRIPTOR' : _MIRCONTEXT, + '__module__' : 'mir_command_110_pb2' + # @@protoc_insertion_point(class_scope:mir.command110.MirContext) + }) +_sym_db.RegisterMessage(MirContext) +_sym_db.RegisterMessage(MirContext.PredefinedKeyidsCntEntry) +_sym_db.RegisterMessage(MirContext.ProjectPredefinedKeyidsCntEntry) +_sym_db.RegisterMessage(MirContext.CustomizedKeywordsCntEntry) + + +_MIRMETADATAS_ATTRIBUTESENTRY._options = None +_MIRANNOTATIONS_TASKANNOTATIONSENTRY._options = None +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY._options = None +_MIRKEYWORDS_KEYWORDSENTRY._options = None +_MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY._options = None +_MIRTASKS_TASKSENTRY._options = None +_TASK_UNKNOWNTYPESENTRY._options = None +_EVALUATION_DATASETEVALUATIONSENTRY._options = None +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY._options = None +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY._options = None +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY._options = None +_MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY._options = None +_MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY._options = None +_MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY._options = None +# @@protoc_insertion_point(module_scope) diff --git a/ymir/updater/app/mir/protos/mir_command_200.proto b/ymir/updater/app/mir/protos/mir_command_200.proto new file mode 100644 index 0000000000..5bd4d3d2c1 --- /dev/null +++ b/ymir/updater/app/mir/protos/mir_command_200.proto @@ -0,0 +1,417 @@ +syntax = "proto3"; + +package mir.command200; + +option go_package = "/protos"; + +/// assertion type: training, validation or test +enum TvtType { + TvtTypeUnknown = 0; + TvtTypeTraining = 1; + TvtTypeValidation = 2; + TvtTypeTest = 3; +}; + +enum AssetType { + AssetTypeUnknown = 0; + AssetTypeImageJpeg = 1; + AssetTypeImagePng = 2; + AssetTypeImagePixelMat = 3; + AssetTypeImageYuv420p = 4; + AssetTypeImageYuv420sp = 5; + AssetTypeImageYuv422p = 6; + AssetTypeImageYuv422sp = 7; + AssetTypeImageBmp = 8; + AssetTypeVideoMp4 = 101; +}; + +/// task type +enum TaskType { + TaskTypeUnknown = 0; + TaskTypeTraining = 1; + TaskTypeMining = 2; + TaskTypeLabel = 3; + TaskTypeFilter = 4; + TaskTypeImportData = 5; + TaskTypeExportData = 6; + TaskTypeCopyData = 7; + TaskTypeMerge = 8; + TaskTypeInfer = 9; + TaskTypeSampling = 10; + /// used by ymir_controller + TaskTypeFusion = 11; + TaskTypeInit = 12; + TaskTypeImportModel = 13; + TaskTypeCopyModel = 14; + TaskTypeDatasetInfer = 15; + TaskTypeEvaluate = 16; +}; + +enum TaskState { + TaskStateUnknown = 0; + TaskStatePending = 1; + TaskStateRunning = 2; + TaskStateDone = 3; + TaskStateError = 4; + TaskStateMiss = 5; +}; + +enum Sha1Type { + SHA1_TYPE_UNKNOWN = 0; + SHA1_TYPE_ASSET = 1; + SHA1_TYPE_COMMIT = 2; +} + +enum MirStorage { + MIR_METADATAS = 0; + MIR_ANNOTATIONS = 1; + MIR_KEYWORDS = 2; + MIR_TASKS = 3; + MIR_CONTEXT = 4; +} + +enum AnnoFormat { + AF_NO_ANNOTATION = 0; + AF_DET_PASCAL_VOC = 1; + AF_DET_ARK_JSON = 2; + AF_DET_LS_JSON = 3; + AF_SEG_POLYGON = 4; + AF_SEG_MASK = 5; +}; + +enum AssetFormat { + AF_UNKNOWN = 0; + AF_RAW = 1; + AF_LMDB = 2; +}; + +/// ========== metadatas.mir ========== +message MirMetadatas { + /// key: asset hash, value: attributes + map attributes = 1; +}; + +message MetadataAttributes { + Timestamp timestamp = 2; + TvtType tvt_type = 3; + AssetType asset_type = 4; + int32 width = 5; /// column number + int32 height = 6; /// row number + int32 image_channels = 7; /// (for images) channel count + int32 byte_size = 8; + string origin_filename = 9; + + reserved 1; +}; + +message Timestamp { + /// start time stamp, use int32 as int64 is not correctly parsed. + int32 start = 1; + /// duration (in seconds), for images, it's always 0 + float duration = 2; +}; + +/// ========== annotations.mir ========== +message MirAnnotations { + SingleTaskAnnotations ground_truth = 3; + SingleTaskAnnotations prediction = 4; + // key: asset id, value: cks and image quality, from pred and gt + map image_cks = 5; + + reserved 1, 2; +}; + +enum AnnoType { + AT_UNKNOWN = 0; + AT_CLASS = 1; // Classification with class id, not implemented. + AT_DET_BOX = 2; // Detection w. bounding box. + AT_SEG_POLYGON = 3; // Semantic Segmentation w. ploygons. + AT_SEG_MASK = 4; // Instance Segmentation w. mask. +}; + +message SingleTaskAnnotations { + /// key: image id, value: annotations of that single image + map image_annotations = 1; + string task_id = 2; + AnnoType type = 3; + // Set of all shown class ids. + repeated int32 task_class_ids = 4; + map map_id_color = 5; + + // meta infos of this SingleTaskAnnotations + repeated int32 eval_class_ids = 10; + // model meta info associated with this single_task_annotations + ModelMeta model = 11; + // executor config used to generate this single task annotations + string executor_config = 12; +}; + +message SingleImageAnnotations { + repeated ObjectAnnotation boxes = 2; + + repeated ObjectAnnotation polygons = 3; + + MaskAnnotation mask = 4; + // Set of class ids shown in this image. + repeated int32 img_class_ids = 5; + + reserved 1; +}; + +message SingleImageCks { + map cks = 1; + float image_quality = 2; +} + +message MaskAnnotation { + // PNG image with 3 channels where each pixel corresponds to a class_id. + bytes semantic_mask = 1; + // PNG image with 3 channels where each pixel corresponds to an object_id. + bytes instance_mask = 2; + repeated int32 object_ids = 3; +} + +message ObjectAnnotation { + // Index of this annotation in current single image, may be different from the index in repeated field. + int32 index = 1; + Rect box = 2; + int32 class_id = 3; + double score = 4; + float anno_quality = 5; + map tags = 6; + ConfusionMatrixType cm = 7; + int32 det_link_id = 8; + string class_name = 9; // for data parsed from outside, e.g. inference. + repeated IntPoint polygon = 10; +}; + +enum ConfusionMatrixType { + NotSet = 0; + TP = 1; + FP = 2; + FN = 3; + TN = 4; + Unknown = 5; + // Matched True Positive, only for gt. + MTP = 11; + IGNORED = 12; +}; + +message Rect { + int32 x = 1; + int32 y = 2; + int32 w = 3; + int32 h = 4; + float rotate_angle = 5; // unit in pi. +}; + +/// ========== keywords.mir ========== +message MirKeywords { + CiTagToIndex pred_idx = 7; // ci to assets, generated from preds + CiTagToIndex gt_idx = 8; // ci to assets, generated from gt + + // key: ck main key, value: assets and assets with sub keys, from (mir_annotations.image_cks) pred and gt + map ck_idx = 9; + + reserved 1, 2, 3, 4, 5, 6; +}; + +message CiTagToIndex { + // key: ci, value: annos + map cis = 1; + // key: ck main key, value: annos and annos with sub keys + map tags = 2; +}; + +message StringList { + repeated string asset_ids = 1; +}; + +message MapStringToInt32List { + map key_ids = 1; +}; + +message Int32List { + repeated int32 ids = 1; +} + +message AssetAnnoIndex { + map asset_annos = 1; // key: asset id, value: annotation indexes + map sub_indexes = 2; // key: ck value, value: asset and it's annotation indexes +}; + +/// ========== tasks.mir ========== +message MirTasks { + map tasks = 1; + string head_task_id = 2; +}; + +message Task { + TaskType type = 1; + /// user defined task name + string name = 2; + /// auto generated unique id + string task_id = 3; + /// execution time of this task + int32 timestamp = 5; // RFC 3339 date strings + /// (for training task): result model for cmd train + ModelMeta model = 6; + int32 return_code = 8; + string return_msg = 9; + Evaluation evaluation = 10; + /// (for import task): new types for cmd import, key: class name, value: asset count + map new_types = 11; + /// (for import task): reason for new types, True: added, False: ignored + bool new_types_added = 12; + + string serialized_task_parameters = 102; + string serialized_executor_config = 103; + string src_revs = 104; + string dst_rev = 105; + string executor = 106; + + reserved 4, 7, 100, 101; +}; + +message ModelMeta { + /// hash for models.tar.gz + string model_hash = 1; + /// model mAP + float mean_average_precision = 2; + /// context generated by train command + string context = 3; + map stages = 4; + string best_stage_name = 5; + repeated string class_names = 6; +}; + +message ModelStage { + string stage_name = 1; + repeated string files = 2; + int32 timestamp = 3; + float mAP = 4; +}; + +message Evaluation { + EvaluateConfig config = 1; + SingleDatasetEvaluation dataset_evaluation = 3; + SingleDatasetEvaluation main_ck = 4; + map sub_cks = 5; + EvaluationState state = 6; + + reserved 2; +} + +message EvaluateConfig { + float conf_thr = 3; + string iou_thrs_interval = 4; + bool need_pr_curve = 5; + repeated int32 class_ids = 7; + string main_ck = 8; + + reserved 1, 2, 6; +} + +message SingleDatasetEvaluation { + float conf_thr = 1; + map iou_evaluations = 4; // key: string of iou threshold + SingleIouEvaluation iou_averaged_evaluation = 5; // average for all ious + + reserved 2, 3; +} + +message SingleIouEvaluation { + map ci_evaluations = 1; // key: class ids + SingleEvaluationElement ci_averaged_evaluation = 2; // evaluations averaged by class ids + + reserved 3; +} + +message SingleEvaluationElement { + float ap = 1; + float ar = 2; + int32 tp = 3; + int32 fp = 4; + int32 fn = 5; + repeated FloatPoint pr_curve = 6; +} + +message IntPoint { + int32 x = 1; + int32 y = 2; + int32 z = 3; +} + +message FloatPoint { + float x = 1; + float y = 2; + float z = 3; +} + +enum EvaluationState { + // evaluate not started + ES_NOT_SET = 0; + // evaluation result ready to use + ES_READY = 1; + // evaluation not finished because there's no gt or pred + ES_NO_GT_OR_PRED = 2; + // evaluation not finished because there're too many images or too many class ids + ES_EXCEEDS_LIMIT = 3; + // evaluation not finished because there's no evaluate class ids + ES_NO_CLASS_IDS = 4; +} + +/// ========== context.mir ========== +message MirContext { + /// total images count + int32 images_cnt = 1; + + /// from pred and gt + map cks_cnt = 6; + + int32 total_asset_mbytes = 11; + + AnnoStats pred_stats = 100; + AnnoStats gt_stats = 101; + + reserved 2, 3, 4, 5, 7, 8, 9, 10, 12; +}; + +message SingleMapCount { + int32 cnt = 1; + map sub_cnt = 2; +}; + +message AnnoStats { + int32 total_cnt = 1; + int32 positive_asset_cnt = 2; + int32 negative_asset_cnt = 3; + map tags_cnt = 7; // key: main tag name, value: main tag count and sub tag names and counts + map class_ids_cnt = 8; // key: class ids, value: asset count for this class id + + // Shortcut of class_ids for evaluation (dup. field as in SingleTaskAnnotations). + repeated int32 eval_class_ids = 9; + + reserved 4, 5, 6; +}; + +message ExportConfig { + // Asset config. + AssetFormat asset_format = 1; + string asset_dir = 2; + string asset_index_file = 3; + // Index file writes abs path. In TMI case, path should be converted, e.g. /in/assets. + string asset_index_prefix = 4; + string media_location = 5; + bool need_sub_folder = 6; + + // Annotation config. + AnnoFormat anno_format = 50; + string gt_dir = 51; + string gt_index_file = 52; + string gt_index_prefix = 53; + string pred_dir = 54; + string pred_index_file = 55; + string pred_index_prefix = 56; + string tvt_index_dir = 57; +} diff --git a/ymir/updater/app/mir/protos/mir_command_200_pb2.py b/ymir/updater/app/mir/protos/mir_command_200_pb2.py new file mode 100644 index 0000000000..b9de8162a9 --- /dev/null +++ b/ymir/updater/app/mir/protos/mir_command_200_pb2.py @@ -0,0 +1,3757 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: mir_command_200.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='mir_command_200.proto', + package='mir.command200', + syntax='proto3', + serialized_options=b'Z\007/protos', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x15mir_command_200.proto\x12\x0emir.command200\"\xa7\x01\n\x0cMirMetadatas\x12@\n\nattributes\x18\x01 \x03(\x0b\x32,.mir.command200.MirMetadatas.AttributesEntry\x1aU\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command200.MetadataAttributes:\x02\x38\x01\"\x85\x02\n\x12MetadataAttributes\x12,\n\ttimestamp\x18\x02 \x01(\x0b\x32\x19.mir.command200.Timestamp\x12)\n\x08tvt_type\x18\x03 \x01(\x0e\x32\x17.mir.command200.TvtType\x12-\n\nasset_type\x18\x04 \x01(\x0e\x32\x19.mir.command200.AssetType\x12\r\n\x05width\x18\x05 \x01(\x05\x12\x0e\n\x06height\x18\x06 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x07 \x01(\x05\x12\x11\n\tbyte_size\x18\x08 \x01(\x05\x12\x17\n\x0forigin_filename\x18\t \x01(\tJ\x04\x08\x01\x10\x02\",\n\tTimestamp\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"\xa6\x02\n\x0eMirAnnotations\x12;\n\x0cground_truth\x18\x03 \x01(\x0b\x32%.mir.command200.SingleTaskAnnotations\x12\x39\n\nprediction\x18\x04 \x01(\x0b\x32%.mir.command200.SingleTaskAnnotations\x12?\n\timage_cks\x18\x05 \x03(\x0b\x32,.mir.command200.MirAnnotations.ImageCksEntry\x1aO\n\rImageCksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.mir.command200.SingleImageCks:\x02\x38\x01J\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03\"\x96\x04\n\x15SingleTaskAnnotations\x12V\n\x11image_annotations\x18\x01 \x03(\x0b\x32;.mir.command200.SingleTaskAnnotations.ImageAnnotationsEntry\x12\x0f\n\x07task_id\x18\x02 \x01(\t\x12&\n\x04type\x18\x03 \x01(\x0e\x32\x18.mir.command200.AnnoType\x12\x16\n\x0etask_class_ids\x18\x04 \x03(\x05\x12K\n\x0cmap_id_color\x18\x05 \x03(\x0b\x32\x35.mir.command200.SingleTaskAnnotations.MapIdColorEntry\x12\x16\n\x0e\x65val_class_ids\x18\n \x03(\x05\x12(\n\x05model\x18\x0b \x01(\x0b\x32\x19.mir.command200.ModelMeta\x12\x17\n\x0f\x65xecutor_config\x18\x0c \x01(\t\x1a_\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.mir.command200.SingleImageAnnotations:\x02\x38\x01\x1aK\n\x0fMapIdColorEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\'\n\x05value\x18\x02 \x01(\x0b\x32\x18.mir.command200.IntPoint:\x02\x38\x01\"\xc8\x01\n\x16SingleImageAnnotations\x12/\n\x05\x62oxes\x18\x02 \x03(\x0b\x32 .mir.command200.ObjectAnnotation\x12\x32\n\x08polygons\x18\x03 \x03(\x0b\x32 .mir.command200.ObjectAnnotation\x12,\n\x04mask\x18\x04 \x01(\x0b\x32\x1e.mir.command200.MaskAnnotation\x12\x15\n\rimg_class_ids\x18\x05 \x03(\x05J\x04\x08\x01\x10\x02\"\x89\x01\n\x0eSingleImageCks\x12\x34\n\x03\x63ks\x18\x01 \x03(\x0b\x32\'.mir.command200.SingleImageCks.CksEntry\x12\x15\n\rimage_quality\x18\x02 \x01(\x02\x1a*\n\x08\x43ksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"R\n\x0eMaskAnnotation\x12\x15\n\rsemantic_mask\x18\x01 \x01(\x0c\x12\x15\n\rinstance_mask\x18\x02 \x01(\x0c\x12\x12\n\nobject_ids\x18\x03 \x03(\x05\"\xe7\x02\n\x10ObjectAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12!\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x14.mir.command200.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\x12\x14\n\x0c\x61nno_quality\x18\x05 \x01(\x02\x12\x38\n\x04tags\x18\x06 \x03(\x0b\x32*.mir.command200.ObjectAnnotation.TagsEntry\x12/\n\x02\x63m\x18\x07 \x01(\x0e\x32#.mir.command200.ConfusionMatrixType\x12\x13\n\x0b\x64\x65t_link_id\x18\x08 \x01(\x05\x12\x12\n\nclass_name\x18\t \x01(\t\x12)\n\x07polygon\x18\n \x03(\x0b\x32\x18.mir.command200.IntPoint\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"H\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\x12\x14\n\x0crotate_angle\x18\x05 \x01(\x02\"\x95\x02\n\x0bMirKeywords\x12.\n\x08pred_idx\x18\x07 \x01(\x0b\x32\x1c.mir.command200.CiTagToIndex\x12,\n\x06gt_idx\x18\x08 \x01(\x0b\x32\x1c.mir.command200.CiTagToIndex\x12\x36\n\x06\x63k_idx\x18\t \x03(\x0b\x32&.mir.command200.MirKeywords.CkIdxEntry\x1aL\n\nCkIdxEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.mir.command200.AssetAnnoIndex:\x02\x38\x01J\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"\x97\x02\n\x0c\x43iTagToIndex\x12\x32\n\x03\x63is\x18\x01 \x03(\x0b\x32%.mir.command200.CiTagToIndex.CisEntry\x12\x34\n\x04tags\x18\x02 \x03(\x0b\x32&.mir.command200.CiTagToIndex.TagsEntry\x1aP\n\x08\x43isEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.mir.command200.MapStringToInt32List:\x02\x38\x01\x1aK\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.mir.command200.AssetAnnoIndex:\x02\x38\x01\"\x1f\n\nStringList\x12\x11\n\tasset_ids\x18\x01 \x03(\t\"\xa3\x01\n\x14MapStringToInt32List\x12\x41\n\x07key_ids\x18\x01 \x03(\x0b\x32\x30.mir.command200.MapStringToInt32List.KeyIdsEntry\x1aH\n\x0bKeyIdsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.mir.command200.Int32List:\x02\x38\x01\"\x18\n\tInt32List\x12\x0b\n\x03ids\x18\x01 \x03(\x05\"\xc1\x02\n\x0e\x41ssetAnnoIndex\x12\x43\n\x0b\x61sset_annos\x18\x01 \x03(\x0b\x32..mir.command200.AssetAnnoIndex.AssetAnnosEntry\x12\x43\n\x0bsub_indexes\x18\x02 \x03(\x0b\x32..mir.command200.AssetAnnoIndex.SubIndexesEntry\x1aL\n\x0f\x41ssetAnnosEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.mir.command200.Int32List:\x02\x38\x01\x1aW\n\x0fSubIndexesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.mir.command200.MapStringToInt32List:\x02\x38\x01\"\x98\x01\n\x08MirTasks\x12\x32\n\x05tasks\x18\x01 \x03(\x0b\x32#.mir.command200.MirTasks.TasksEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a\x42\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12#\n\x05value\x18\x02 \x01(\x0b\x32\x14.mir.command200.Task:\x02\x38\x01\"\xf9\x03\n\x04Task\x12&\n\x04type\x18\x01 \x01(\x0e\x32\x18.mir.command200.TaskType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07task_id\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x05\x12(\n\x05model\x18\x06 \x01(\x0b\x32\x19.mir.command200.ModelMeta\x12\x13\n\x0breturn_code\x18\x08 \x01(\x05\x12\x12\n\nreturn_msg\x18\t \x01(\t\x12.\n\nevaluation\x18\n \x01(\x0b\x32\x1a.mir.command200.Evaluation\x12\x35\n\tnew_types\x18\x0b \x03(\x0b\x32\".mir.command200.Task.NewTypesEntry\x12\x17\n\x0fnew_types_added\x18\x0c \x01(\x08\x12\"\n\x1aserialized_task_parameters\x18\x66 \x01(\t\x12\"\n\x1aserialized_executor_config\x18g \x01(\t\x12\x10\n\x08src_revs\x18h \x01(\t\x12\x0f\n\x07\x64st_rev\x18i \x01(\t\x12\x10\n\x08\x65xecutor\x18j \x01(\t\x1a/\n\rNewTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x07\x10\x08J\x04\x08\x64\x10\x65J\x04\x08\x65\x10\x66\"\x80\x02\n\tModelMeta\x12\x12\n\nmodel_hash\x18\x01 \x01(\t\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\x12\x35\n\x06stages\x18\x04 \x03(\x0b\x32%.mir.command200.ModelMeta.StagesEntry\x12\x17\n\x0f\x62\x65st_stage_name\x18\x05 \x01(\t\x12\x13\n\x0b\x63lass_names\x18\x06 \x03(\t\x1aI\n\x0bStagesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0b\x32\x1a.mir.command200.ModelStage:\x02\x38\x01\"O\n\nModelStage\x12\x12\n\nstage_name\x18\x01 \x01(\t\x12\r\n\x05\x66iles\x18\x02 \x03(\t\x12\x11\n\ttimestamp\x18\x03 \x01(\x05\x12\x0b\n\x03mAP\x18\x04 \x01(\x02\"\x82\x03\n\nEvaluation\x12.\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x1e.mir.command200.EvaluateConfig\x12\x43\n\x12\x64\x61taset_evaluation\x18\x03 \x01(\x0b\x32\'.mir.command200.SingleDatasetEvaluation\x12\x38\n\x07main_ck\x18\x04 \x01(\x0b\x32\'.mir.command200.SingleDatasetEvaluation\x12\x37\n\x07sub_cks\x18\x05 \x03(\x0b\x32&.mir.command200.Evaluation.SubCksEntry\x12.\n\x05state\x18\x06 \x01(\x0e\x32\x1f.mir.command200.EvaluationState\x1aV\n\x0bSubCksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.mir.command200.SingleDatasetEvaluation:\x02\x38\x01J\x04\x08\x02\x10\x03\"\x8a\x01\n\x0e\x45valuateConfig\x12\x10\n\x08\x63onf_thr\x18\x03 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x04 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x05 \x01(\x08\x12\x11\n\tclass_ids\x18\x07 \x03(\x05\x12\x0f\n\x07main_ck\x18\x08 \x01(\tJ\x04\x08\x01\x10\x02J\x04\x08\x02\x10\x03J\x04\x08\x06\x10\x07\"\xaf\x02\n\x17SingleDatasetEvaluation\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12T\n\x0fiou_evaluations\x18\x04 \x03(\x0b\x32;.mir.command200.SingleDatasetEvaluation.IouEvaluationsEntry\x12\x44\n\x17iou_averaged_evaluation\x18\x05 \x01(\x0b\x32#.mir.command200.SingleIouEvaluation\x1aZ\n\x13IouEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command200.SingleIouEvaluation:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04\"\x93\x02\n\x13SingleIouEvaluation\x12N\n\x0e\x63i_evaluations\x18\x01 \x03(\x0b\x32\x36.mir.command200.SingleIouEvaluation.CiEvaluationsEntry\x12G\n\x16\x63i_averaged_evaluation\x18\x02 \x01(\x0b\x32\'.mir.command200.SingleEvaluationElement\x1a]\n\x12\x43iEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x36\n\x05value\x18\x02 \x01(\x0b\x32\'.mir.command200.SingleEvaluationElement:\x02\x38\x01J\x04\x08\x03\x10\x04\"\x83\x01\n\x17SingleEvaluationElement\x12\n\n\x02\x61p\x18\x01 \x01(\x02\x12\n\n\x02\x61r\x18\x02 \x01(\x02\x12\n\n\x02tp\x18\x03 \x01(\x05\x12\n\n\x02\x66p\x18\x04 \x01(\x05\x12\n\n\x02\x66n\x18\x05 \x01(\x05\x12,\n\x08pr_curve\x18\x06 \x03(\x0b\x32\x1a.mir.command200.FloatPoint\"+\n\x08IntPoint\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01z\x18\x03 \x01(\x05\"-\n\nFloatPoint\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"\xd6\x02\n\nMirContext\x12\x12\n\nimages_cnt\x18\x01 \x01(\x05\x12\x37\n\x07\x63ks_cnt\x18\x06 \x03(\x0b\x32&.mir.command200.MirContext.CksCntEntry\x12\x1a\n\x12total_asset_mbytes\x18\x0b \x01(\x05\x12-\n\npred_stats\x18\x64 \x01(\x0b\x32\x19.mir.command200.AnnoStats\x12+\n\x08gt_stats\x18\x65 \x01(\x0b\x32\x19.mir.command200.AnnoStats\x1aM\n\x0b\x43ksCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.mir.command200.SingleMapCount:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x07\x10\x08J\x04\x08\x08\x10\tJ\x04\x08\t\x10\nJ\x04\x08\n\x10\x0bJ\x04\x08\x0c\x10\r\"\x89\x01\n\x0eSingleMapCount\x12\x0b\n\x03\x63nt\x18\x01 \x01(\x05\x12;\n\x07sub_cnt\x18\x02 \x03(\x0b\x32*.mir.command200.SingleMapCount.SubCntEntry\x1a-\n\x0bSubCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\x81\x03\n\tAnnoStats\x12\x11\n\ttotal_cnt\x18\x01 \x01(\x05\x12\x1a\n\x12positive_asset_cnt\x18\x02 \x01(\x05\x12\x1a\n\x12negative_asset_cnt\x18\x03 \x01(\x05\x12\x38\n\x08tags_cnt\x18\x07 \x03(\x0b\x32&.mir.command200.AnnoStats.TagsCntEntry\x12\x41\n\rclass_ids_cnt\x18\x08 \x03(\x0b\x32*.mir.command200.AnnoStats.ClassIdsCntEntry\x12\x16\n\x0e\x65val_class_ids\x18\t \x03(\x05\x1aN\n\x0cTagsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x1e.mir.command200.SingleMapCount:\x02\x38\x01\x1a\x32\n\x10\x43lassIdsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06J\x04\x08\x06\x10\x07\"\x89\x03\n\x0c\x45xportConfig\x12\x31\n\x0c\x61sset_format\x18\x01 \x01(\x0e\x32\x1b.mir.command200.AssetFormat\x12\x11\n\tasset_dir\x18\x02 \x01(\t\x12\x18\n\x10\x61sset_index_file\x18\x03 \x01(\t\x12\x1a\n\x12\x61sset_index_prefix\x18\x04 \x01(\t\x12\x16\n\x0emedia_location\x18\x05 \x01(\t\x12\x17\n\x0fneed_sub_folder\x18\x06 \x01(\x08\x12/\n\x0b\x61nno_format\x18\x32 \x01(\x0e\x32\x1a.mir.command200.AnnoFormat\x12\x0e\n\x06gt_dir\x18\x33 \x01(\t\x12\x15\n\rgt_index_file\x18\x34 \x01(\t\x12\x17\n\x0fgt_index_prefix\x18\x35 \x01(\t\x12\x10\n\x08pred_dir\x18\x36 \x01(\t\x12\x17\n\x0fpred_index_file\x18\x37 \x01(\t\x12\x19\n\x11pred_index_prefix\x18\x38 \x01(\t\x12\x15\n\rtvt_index_dir\x18\x39 \x01(\t*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\x88\x02\n\tAssetType\x12\x14\n\x10\x41ssetTypeUnknown\x10\x00\x12\x16\n\x12\x41ssetTypeImageJpeg\x10\x01\x12\x15\n\x11\x41ssetTypeImagePng\x10\x02\x12\x1a\n\x16\x41ssetTypeImagePixelMat\x10\x03\x12\x19\n\x15\x41ssetTypeImageYuv420p\x10\x04\x12\x1a\n\x16\x41ssetTypeImageYuv420sp\x10\x05\x12\x19\n\x15\x41ssetTypeImageYuv422p\x10\x06\x12\x1a\n\x16\x41ssetTypeImageYuv422sp\x10\x07\x12\x15\n\x11\x41ssetTypeImageBmp\x10\x08\x12\x15\n\x11\x41ssetTypeVideoMp4\x10\x65*\xf8\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x10\n\x0cTaskTypeInit\x10\x0c\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x15\n\x11TaskTypeCopyModel\x10\x0e\x12\x18\n\x14TaskTypeDatasetInfer\x10\x0f\x12\x14\n\x10TaskTypeEvaluate\x10\x10*\x87\x01\n\tTaskState\x12\x14\n\x10TaskStateUnknown\x10\x00\x12\x14\n\x10TaskStatePending\x10\x01\x12\x14\n\x10TaskStateRunning\x10\x02\x12\x11\n\rTaskStateDone\x10\x03\x12\x12\n\x0eTaskStateError\x10\x04\x12\x11\n\rTaskStateMiss\x10\x05*L\n\x08Sha1Type\x12\x15\n\x11SHA1_TYPE_UNKNOWN\x10\x00\x12\x13\n\x0fSHA1_TYPE_ASSET\x10\x01\x12\x14\n\x10SHA1_TYPE_COMMIT\x10\x02*f\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03\x12\x0f\n\x0bMIR_CONTEXT\x10\x04*\x87\x01\n\nAnnoFormat\x12\x14\n\x10\x41\x46_NO_ANNOTATION\x10\x00\x12\x15\n\x11\x41\x46_DET_PASCAL_VOC\x10\x01\x12\x13\n\x0f\x41\x46_DET_ARK_JSON\x10\x02\x12\x12\n\x0e\x41\x46_DET_LS_JSON\x10\x03\x12\x12\n\x0e\x41\x46_SEG_POLYGON\x10\x04\x12\x0f\n\x0b\x41\x46_SEG_MASK\x10\x05*6\n\x0b\x41ssetFormat\x12\x0e\n\nAF_UNKNOWN\x10\x00\x12\n\n\x06\x41\x46_RAW\x10\x01\x12\x0b\n\x07\x41\x46_LMDB\x10\x02*]\n\x08\x41nnoType\x12\x0e\n\nAT_UNKNOWN\x10\x00\x12\x0c\n\x08\x41T_CLASS\x10\x01\x12\x0e\n\nAT_DET_BOX\x10\x02\x12\x12\n\x0e\x41T_SEG_POLYGON\x10\x03\x12\x0f\n\x0b\x41T_SEG_MASK\x10\x04*d\n\x13\x43onfusionMatrixType\x12\n\n\x06NotSet\x10\x00\x12\x06\n\x02TP\x10\x01\x12\x06\n\x02\x46P\x10\x02\x12\x06\n\x02\x46N\x10\x03\x12\x06\n\x02TN\x10\x04\x12\x0b\n\x07Unknown\x10\x05\x12\x07\n\x03MTP\x10\x0b\x12\x0b\n\x07IGNORED\x10\x0c*p\n\x0f\x45valuationState\x12\x0e\n\nES_NOT_SET\x10\x00\x12\x0c\n\x08\x45S_READY\x10\x01\x12\x14\n\x10\x45S_NO_GT_OR_PRED\x10\x02\x12\x14\n\x10\x45S_EXCEEDS_LIMIT\x10\x03\x12\x13\n\x0f\x45S_NO_CLASS_IDS\x10\x04\x42\tZ\x07/protosb\x06proto3' +) + +_TVTTYPE = _descriptor.EnumDescriptor( + name='TvtType', + full_name='mir.command200.TvtType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TvtTypeUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TvtTypeTraining', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TvtTypeValidation', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TvtTypeTest', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=6941, + serialized_end=7031, +) +_sym_db.RegisterEnumDescriptor(_TVTTYPE) + +TvtType = enum_type_wrapper.EnumTypeWrapper(_TVTTYPE) +_ASSETTYPE = _descriptor.EnumDescriptor( + name='AssetType', + full_name='mir.command200.AssetType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AssetTypeUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageJpeg', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImagePng', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImagePixelMat', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv420p', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv420sp', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv422p', index=6, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageYuv422sp', index=7, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeImageBmp', index=8, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AssetTypeVideoMp4', index=9, number=101, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7034, + serialized_end=7298, +) +_sym_db.RegisterEnumDescriptor(_ASSETTYPE) + +AssetType = enum_type_wrapper.EnumTypeWrapper(_ASSETTYPE) +_TASKTYPE = _descriptor.EnumDescriptor( + name='TaskType', + full_name='mir.command200.TaskType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TaskTypeUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeTraining', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeMining', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeLabel', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeFilter', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeImportData', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeExportData', index=6, number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeCopyData', index=7, number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeMerge', index=8, number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeInfer', index=9, number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeSampling', index=10, number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeFusion', index=11, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeInit', index=12, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeImportModel', index=13, number=13, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeCopyModel', index=14, number=14, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeDatasetInfer', index=15, number=15, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeEvaluate', index=16, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7301, + serialized_end=7677, +) +_sym_db.RegisterEnumDescriptor(_TASKTYPE) + +TaskType = enum_type_wrapper.EnumTypeWrapper(_TASKTYPE) +_TASKSTATE = _descriptor.EnumDescriptor( + name='TaskState', + full_name='mir.command200.TaskState', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='TaskStateUnknown', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStatePending', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateRunning', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateDone', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateError', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskStateMiss', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7680, + serialized_end=7815, +) +_sym_db.RegisterEnumDescriptor(_TASKSTATE) + +TaskState = enum_type_wrapper.EnumTypeWrapper(_TASKSTATE) +_SHA1TYPE = _descriptor.EnumDescriptor( + name='Sha1Type', + full_name='mir.command200.Sha1Type', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='SHA1_TYPE_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='SHA1_TYPE_ASSET', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='SHA1_TYPE_COMMIT', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7817, + serialized_end=7893, +) +_sym_db.RegisterEnumDescriptor(_SHA1TYPE) + +Sha1Type = enum_type_wrapper.EnumTypeWrapper(_SHA1TYPE) +_MIRSTORAGE = _descriptor.EnumDescriptor( + name='MirStorage', + full_name='mir.command200.MirStorage', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='MIR_METADATAS', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_ANNOTATIONS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_KEYWORDS', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_TASKS', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MIR_CONTEXT', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=7895, + serialized_end=7997, +) +_sym_db.RegisterEnumDescriptor(_MIRSTORAGE) + +MirStorage = enum_type_wrapper.EnumTypeWrapper(_MIRSTORAGE) +_ANNOFORMAT = _descriptor.EnumDescriptor( + name='AnnoFormat', + full_name='mir.command200.AnnoFormat', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AF_NO_ANNOTATION', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_DET_PASCAL_VOC', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_DET_ARK_JSON', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_DET_LS_JSON', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_SEG_POLYGON', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_SEG_MASK', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=8000, + serialized_end=8135, +) +_sym_db.RegisterEnumDescriptor(_ANNOFORMAT) + +AnnoFormat = enum_type_wrapper.EnumTypeWrapper(_ANNOFORMAT) +_ASSETFORMAT = _descriptor.EnumDescriptor( + name='AssetFormat', + full_name='mir.command200.AssetFormat', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AF_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_RAW', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AF_LMDB', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=8137, + serialized_end=8191, +) +_sym_db.RegisterEnumDescriptor(_ASSETFORMAT) + +AssetFormat = enum_type_wrapper.EnumTypeWrapper(_ASSETFORMAT) +_ANNOTYPE = _descriptor.EnumDescriptor( + name='AnnoType', + full_name='mir.command200.AnnoType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='AT_UNKNOWN', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_CLASS', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_DET_BOX', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_SEG_POLYGON', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='AT_SEG_MASK', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=8193, + serialized_end=8286, +) +_sym_db.RegisterEnumDescriptor(_ANNOTYPE) + +AnnoType = enum_type_wrapper.EnumTypeWrapper(_ANNOTYPE) +_CONFUSIONMATRIXTYPE = _descriptor.EnumDescriptor( + name='ConfusionMatrixType', + full_name='mir.command200.ConfusionMatrixType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='NotSet', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TP', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FP', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='FN', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TN', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='Unknown', index=5, number=5, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='MTP', index=6, number=11, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='IGNORED', index=7, number=12, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=8288, + serialized_end=8388, +) +_sym_db.RegisterEnumDescriptor(_CONFUSIONMATRIXTYPE) + +ConfusionMatrixType = enum_type_wrapper.EnumTypeWrapper(_CONFUSIONMATRIXTYPE) +_EVALUATIONSTATE = _descriptor.EnumDescriptor( + name='EvaluationState', + full_name='mir.command200.EvaluationState', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='ES_NOT_SET', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ES_READY', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ES_NO_GT_OR_PRED', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ES_EXCEEDS_LIMIT', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ES_NO_CLASS_IDS', index=4, number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=8390, + serialized_end=8502, +) +_sym_db.RegisterEnumDescriptor(_EVALUATIONSTATE) + +EvaluationState = enum_type_wrapper.EnumTypeWrapper(_EVALUATIONSTATE) +TvtTypeUnknown = 0 +TvtTypeTraining = 1 +TvtTypeValidation = 2 +TvtTypeTest = 3 +AssetTypeUnknown = 0 +AssetTypeImageJpeg = 1 +AssetTypeImagePng = 2 +AssetTypeImagePixelMat = 3 +AssetTypeImageYuv420p = 4 +AssetTypeImageYuv420sp = 5 +AssetTypeImageYuv422p = 6 +AssetTypeImageYuv422sp = 7 +AssetTypeImageBmp = 8 +AssetTypeVideoMp4 = 101 +TaskTypeUnknown = 0 +TaskTypeTraining = 1 +TaskTypeMining = 2 +TaskTypeLabel = 3 +TaskTypeFilter = 4 +TaskTypeImportData = 5 +TaskTypeExportData = 6 +TaskTypeCopyData = 7 +TaskTypeMerge = 8 +TaskTypeInfer = 9 +TaskTypeSampling = 10 +TaskTypeFusion = 11 +TaskTypeInit = 12 +TaskTypeImportModel = 13 +TaskTypeCopyModel = 14 +TaskTypeDatasetInfer = 15 +TaskTypeEvaluate = 16 +TaskStateUnknown = 0 +TaskStatePending = 1 +TaskStateRunning = 2 +TaskStateDone = 3 +TaskStateError = 4 +TaskStateMiss = 5 +SHA1_TYPE_UNKNOWN = 0 +SHA1_TYPE_ASSET = 1 +SHA1_TYPE_COMMIT = 2 +MIR_METADATAS = 0 +MIR_ANNOTATIONS = 1 +MIR_KEYWORDS = 2 +MIR_TASKS = 3 +MIR_CONTEXT = 4 +AF_NO_ANNOTATION = 0 +AF_DET_PASCAL_VOC = 1 +AF_DET_ARK_JSON = 2 +AF_DET_LS_JSON = 3 +AF_SEG_POLYGON = 4 +AF_SEG_MASK = 5 +AF_UNKNOWN = 0 +AF_RAW = 1 +AF_LMDB = 2 +AT_UNKNOWN = 0 +AT_CLASS = 1 +AT_DET_BOX = 2 +AT_SEG_POLYGON = 3 +AT_SEG_MASK = 4 +NotSet = 0 +TP = 1 +FP = 2 +FN = 3 +TN = 4 +Unknown = 5 +MTP = 11 +IGNORED = 12 +ES_NOT_SET = 0 +ES_READY = 1 +ES_NO_GT_OR_PRED = 2 +ES_EXCEEDS_LIMIT = 3 +ES_NO_CLASS_IDS = 4 + + + +_MIRMETADATAS_ATTRIBUTESENTRY = _descriptor.Descriptor( + name='AttributesEntry', + full_name='mir.command200.MirMetadatas.AttributesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.MirMetadatas.AttributesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.MirMetadatas.AttributesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=124, + serialized_end=209, +) + +_MIRMETADATAS = _descriptor.Descriptor( + name='MirMetadatas', + full_name='mir.command200.MirMetadatas', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='attributes', full_name='mir.command200.MirMetadatas.attributes', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRMETADATAS_ATTRIBUTESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=42, + serialized_end=209, +) + + +_METADATAATTRIBUTES = _descriptor.Descriptor( + name='MetadataAttributes', + full_name='mir.command200.MetadataAttributes', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='timestamp', full_name='mir.command200.MetadataAttributes.timestamp', index=0, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tvt_type', full_name='mir.command200.MetadataAttributes.tvt_type', index=1, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='asset_type', full_name='mir.command200.MetadataAttributes.asset_type', index=2, + number=4, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='width', full_name='mir.command200.MetadataAttributes.width', index=3, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='height', full_name='mir.command200.MetadataAttributes.height', index=4, + number=6, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='image_channels', full_name='mir.command200.MetadataAttributes.image_channels', index=5, + number=7, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='byte_size', full_name='mir.command200.MetadataAttributes.byte_size', index=6, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='origin_filename', full_name='mir.command200.MetadataAttributes.origin_filename', index=7, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=212, + serialized_end=473, +) + + +_TIMESTAMP = _descriptor.Descriptor( + name='Timestamp', + full_name='mir.command200.Timestamp', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='start', full_name='mir.command200.Timestamp.start', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='duration', full_name='mir.command200.Timestamp.duration', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=475, + serialized_end=519, +) + + +_MIRANNOTATIONS_IMAGECKSENTRY = _descriptor.Descriptor( + name='ImageCksEntry', + full_name='mir.command200.MirAnnotations.ImageCksEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.MirAnnotations.ImageCksEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.MirAnnotations.ImageCksEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=725, + serialized_end=804, +) + +_MIRANNOTATIONS = _descriptor.Descriptor( + name='MirAnnotations', + full_name='mir.command200.MirAnnotations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ground_truth', full_name='mir.command200.MirAnnotations.ground_truth', index=0, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='prediction', full_name='mir.command200.MirAnnotations.prediction', index=1, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='image_cks', full_name='mir.command200.MirAnnotations.image_cks', index=2, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRANNOTATIONS_IMAGECKSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=522, + serialized_end=816, +) + + +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY = _descriptor.Descriptor( + name='ImageAnnotationsEntry', + full_name='mir.command200.SingleTaskAnnotations.ImageAnnotationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.SingleTaskAnnotations.ImageAnnotationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.SingleTaskAnnotations.ImageAnnotationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1181, + serialized_end=1276, +) + +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY = _descriptor.Descriptor( + name='MapIdColorEntry', + full_name='mir.command200.SingleTaskAnnotations.MapIdColorEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.SingleTaskAnnotations.MapIdColorEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.SingleTaskAnnotations.MapIdColorEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1278, + serialized_end=1353, +) + +_SINGLETASKANNOTATIONS = _descriptor.Descriptor( + name='SingleTaskAnnotations', + full_name='mir.command200.SingleTaskAnnotations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='image_annotations', full_name='mir.command200.SingleTaskAnnotations.image_annotations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='task_id', full_name='mir.command200.SingleTaskAnnotations.task_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='type', full_name='mir.command200.SingleTaskAnnotations.type', index=2, + number=3, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='task_class_ids', full_name='mir.command200.SingleTaskAnnotations.task_class_ids', index=3, + number=4, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='map_id_color', full_name='mir.command200.SingleTaskAnnotations.map_id_color', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='eval_class_ids', full_name='mir.command200.SingleTaskAnnotations.eval_class_ids', index=5, + number=10, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model', full_name='mir.command200.SingleTaskAnnotations.model', index=6, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='executor_config', full_name='mir.command200.SingleTaskAnnotations.executor_config', index=7, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY, _SINGLETASKANNOTATIONS_MAPIDCOLORENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=819, + serialized_end=1353, +) + + +_SINGLEIMAGEANNOTATIONS = _descriptor.Descriptor( + name='SingleImageAnnotations', + full_name='mir.command200.SingleImageAnnotations', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='boxes', full_name='mir.command200.SingleImageAnnotations.boxes', index=0, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='polygons', full_name='mir.command200.SingleImageAnnotations.polygons', index=1, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mask', full_name='mir.command200.SingleImageAnnotations.mask', index=2, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='img_class_ids', full_name='mir.command200.SingleImageAnnotations.img_class_ids', index=3, + number=5, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1356, + serialized_end=1556, +) + + +_SINGLEIMAGECKS_CKSENTRY = _descriptor.Descriptor( + name='CksEntry', + full_name='mir.command200.SingleImageCks.CksEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.SingleImageCks.CksEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.SingleImageCks.CksEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1654, + serialized_end=1696, +) + +_SINGLEIMAGECKS = _descriptor.Descriptor( + name='SingleImageCks', + full_name='mir.command200.SingleImageCks', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='cks', full_name='mir.command200.SingleImageCks.cks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='image_quality', full_name='mir.command200.SingleImageCks.image_quality', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEIMAGECKS_CKSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1559, + serialized_end=1696, +) + + +_MASKANNOTATION = _descriptor.Descriptor( + name='MaskAnnotation', + full_name='mir.command200.MaskAnnotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='semantic_mask', full_name='mir.command200.MaskAnnotation.semantic_mask', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='instance_mask', full_name='mir.command200.MaskAnnotation.instance_mask', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='object_ids', full_name='mir.command200.MaskAnnotation.object_ids', index=2, + number=3, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1698, + serialized_end=1780, +) + + +_OBJECTANNOTATION_TAGSENTRY = _descriptor.Descriptor( + name='TagsEntry', + full_name='mir.command200.ObjectAnnotation.TagsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.ObjectAnnotation.TagsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.ObjectAnnotation.TagsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2099, + serialized_end=2142, +) + +_OBJECTANNOTATION = _descriptor.Descriptor( + name='ObjectAnnotation', + full_name='mir.command200.ObjectAnnotation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='index', full_name='mir.command200.ObjectAnnotation.index', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='box', full_name='mir.command200.ObjectAnnotation.box', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_id', full_name='mir.command200.ObjectAnnotation.class_id', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='score', full_name='mir.command200.ObjectAnnotation.score', index=3, + number=4, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='anno_quality', full_name='mir.command200.ObjectAnnotation.anno_quality', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tags', full_name='mir.command200.ObjectAnnotation.tags', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cm', full_name='mir.command200.ObjectAnnotation.cm', index=6, + number=7, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='det_link_id', full_name='mir.command200.ObjectAnnotation.det_link_id', index=7, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_name', full_name='mir.command200.ObjectAnnotation.class_name', index=8, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='polygon', full_name='mir.command200.ObjectAnnotation.polygon', index=9, + number=10, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_OBJECTANNOTATION_TAGSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1783, + serialized_end=2142, +) + + +_RECT = _descriptor.Descriptor( + name='Rect', + full_name='mir.command200.Rect', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command200.Rect.x', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command200.Rect.y', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='w', full_name='mir.command200.Rect.w', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='h', full_name='mir.command200.Rect.h', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='rotate_angle', full_name='mir.command200.Rect.rotate_angle', index=4, + number=5, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2144, + serialized_end=2216, +) + + +_MIRKEYWORDS_CKIDXENTRY = _descriptor.Descriptor( + name='CkIdxEntry', + full_name='mir.command200.MirKeywords.CkIdxEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.MirKeywords.CkIdxEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.MirKeywords.CkIdxEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2384, + serialized_end=2460, +) + +_MIRKEYWORDS = _descriptor.Descriptor( + name='MirKeywords', + full_name='mir.command200.MirKeywords', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='pred_idx', full_name='mir.command200.MirKeywords.pred_idx', index=0, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_idx', full_name='mir.command200.MirKeywords.gt_idx', index=1, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ck_idx', full_name='mir.command200.MirKeywords.ck_idx', index=2, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRKEYWORDS_CKIDXENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2219, + serialized_end=2496, +) + + +_CITAGTOINDEX_CISENTRY = _descriptor.Descriptor( + name='CisEntry', + full_name='mir.command200.CiTagToIndex.CisEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.CiTagToIndex.CisEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.CiTagToIndex.CisEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2621, + serialized_end=2701, +) + +_CITAGTOINDEX_TAGSENTRY = _descriptor.Descriptor( + name='TagsEntry', + full_name='mir.command200.CiTagToIndex.TagsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.CiTagToIndex.TagsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.CiTagToIndex.TagsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2703, + serialized_end=2778, +) + +_CITAGTOINDEX = _descriptor.Descriptor( + name='CiTagToIndex', + full_name='mir.command200.CiTagToIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='cis', full_name='mir.command200.CiTagToIndex.cis', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tags', full_name='mir.command200.CiTagToIndex.tags', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_CITAGTOINDEX_CISENTRY, _CITAGTOINDEX_TAGSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2499, + serialized_end=2778, +) + + +_STRINGLIST = _descriptor.Descriptor( + name='StringList', + full_name='mir.command200.StringList', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='asset_ids', full_name='mir.command200.StringList.asset_ids', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2780, + serialized_end=2811, +) + + +_MAPSTRINGTOINT32LIST_KEYIDSENTRY = _descriptor.Descriptor( + name='KeyIdsEntry', + full_name='mir.command200.MapStringToInt32List.KeyIdsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.MapStringToInt32List.KeyIdsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.MapStringToInt32List.KeyIdsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2905, + serialized_end=2977, +) + +_MAPSTRINGTOINT32LIST = _descriptor.Descriptor( + name='MapStringToInt32List', + full_name='mir.command200.MapStringToInt32List', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key_ids', full_name='mir.command200.MapStringToInt32List.key_ids', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MAPSTRINGTOINT32LIST_KEYIDSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2814, + serialized_end=2977, +) + + +_INT32LIST = _descriptor.Descriptor( + name='Int32List', + full_name='mir.command200.Int32List', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ids', full_name='mir.command200.Int32List.ids', index=0, + number=1, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2979, + serialized_end=3003, +) + + +_ASSETANNOINDEX_ASSETANNOSENTRY = _descriptor.Descriptor( + name='AssetAnnosEntry', + full_name='mir.command200.AssetAnnoIndex.AssetAnnosEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.AssetAnnoIndex.AssetAnnosEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.AssetAnnoIndex.AssetAnnosEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3162, + serialized_end=3238, +) + +_ASSETANNOINDEX_SUBINDEXESENTRY = _descriptor.Descriptor( + name='SubIndexesEntry', + full_name='mir.command200.AssetAnnoIndex.SubIndexesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.AssetAnnoIndex.SubIndexesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.AssetAnnoIndex.SubIndexesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3240, + serialized_end=3327, +) + +_ASSETANNOINDEX = _descriptor.Descriptor( + name='AssetAnnoIndex', + full_name='mir.command200.AssetAnnoIndex', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='asset_annos', full_name='mir.command200.AssetAnnoIndex.asset_annos', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='sub_indexes', full_name='mir.command200.AssetAnnoIndex.sub_indexes', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ASSETANNOINDEX_ASSETANNOSENTRY, _ASSETANNOINDEX_SUBINDEXESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3006, + serialized_end=3327, +) + + +_MIRTASKS_TASKSENTRY = _descriptor.Descriptor( + name='TasksEntry', + full_name='mir.command200.MirTasks.TasksEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.MirTasks.TasksEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.MirTasks.TasksEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3416, + serialized_end=3482, +) + +_MIRTASKS = _descriptor.Descriptor( + name='MirTasks', + full_name='mir.command200.MirTasks', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='tasks', full_name='mir.command200.MirTasks.tasks', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='head_task_id', full_name='mir.command200.MirTasks.head_task_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRTASKS_TASKSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3330, + serialized_end=3482, +) + + +_TASK_NEWTYPESENTRY = _descriptor.Descriptor( + name='NewTypesEntry', + full_name='mir.command200.Task.NewTypesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.Task.NewTypesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.Task.NewTypesEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3919, + serialized_end=3966, +) + +_TASK = _descriptor.Descriptor( + name='Task', + full_name='mir.command200.Task', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='mir.command200.Task.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='name', full_name='mir.command200.Task.name', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='task_id', full_name='mir.command200.Task.task_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='timestamp', full_name='mir.command200.Task.timestamp', index=3, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='model', full_name='mir.command200.Task.model', index=4, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='return_code', full_name='mir.command200.Task.return_code', index=5, + number=8, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='return_msg', full_name='mir.command200.Task.return_msg', index=6, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='evaluation', full_name='mir.command200.Task.evaluation', index=7, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='new_types', full_name='mir.command200.Task.new_types', index=8, + number=11, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='new_types_added', full_name='mir.command200.Task.new_types_added', index=9, + number=12, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='serialized_task_parameters', full_name='mir.command200.Task.serialized_task_parameters', index=10, + number=102, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='serialized_executor_config', full_name='mir.command200.Task.serialized_executor_config', index=11, + number=103, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='src_revs', full_name='mir.command200.Task.src_revs', index=12, + number=104, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dst_rev', full_name='mir.command200.Task.dst_rev', index=13, + number=105, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='executor', full_name='mir.command200.Task.executor', index=14, + number=106, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_TASK_NEWTYPESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3485, + serialized_end=3990, +) + + +_MODELMETA_STAGESENTRY = _descriptor.Descriptor( + name='StagesEntry', + full_name='mir.command200.ModelMeta.StagesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.ModelMeta.StagesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.ModelMeta.StagesEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4176, + serialized_end=4249, +) + +_MODELMETA = _descriptor.Descriptor( + name='ModelMeta', + full_name='mir.command200.ModelMeta', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='model_hash', full_name='mir.command200.ModelMeta.model_hash', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mean_average_precision', full_name='mir.command200.ModelMeta.mean_average_precision', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='context', full_name='mir.command200.ModelMeta.context', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='stages', full_name='mir.command200.ModelMeta.stages', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='best_stage_name', full_name='mir.command200.ModelMeta.best_stage_name', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_names', full_name='mir.command200.ModelMeta.class_names', index=5, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MODELMETA_STAGESENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3993, + serialized_end=4249, +) + + +_MODELSTAGE = _descriptor.Descriptor( + name='ModelStage', + full_name='mir.command200.ModelStage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='stage_name', full_name='mir.command200.ModelStage.stage_name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='files', full_name='mir.command200.ModelStage.files', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='timestamp', full_name='mir.command200.ModelStage.timestamp', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='mAP', full_name='mir.command200.ModelStage.mAP', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4251, + serialized_end=4330, +) + + +_EVALUATION_SUBCKSENTRY = _descriptor.Descriptor( + name='SubCksEntry', + full_name='mir.command200.Evaluation.SubCksEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.Evaluation.SubCksEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.Evaluation.SubCksEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4627, + serialized_end=4713, +) + +_EVALUATION = _descriptor.Descriptor( + name='Evaluation', + full_name='mir.command200.Evaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='config', full_name='mir.command200.Evaluation.config', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dataset_evaluation', full_name='mir.command200.Evaluation.dataset_evaluation', index=1, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='main_ck', full_name='mir.command200.Evaluation.main_ck', index=2, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='sub_cks', full_name='mir.command200.Evaluation.sub_cks', index=3, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='state', full_name='mir.command200.Evaluation.state', index=4, + number=6, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_EVALUATION_SUBCKSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4333, + serialized_end=4719, +) + + +_EVALUATECONFIG = _descriptor.Descriptor( + name='EvaluateConfig', + full_name='mir.command200.EvaluateConfig', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='conf_thr', full_name='mir.command200.EvaluateConfig.conf_thr', index=0, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_thrs_interval', full_name='mir.command200.EvaluateConfig.iou_thrs_interval', index=1, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='need_pr_curve', full_name='mir.command200.EvaluateConfig.need_pr_curve', index=2, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_ids', full_name='mir.command200.EvaluateConfig.class_ids', index=3, + number=7, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='main_ck', full_name='mir.command200.EvaluateConfig.main_ck', index=4, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4722, + serialized_end=4860, +) + + +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY = _descriptor.Descriptor( + name='IouEvaluationsEntry', + full_name='mir.command200.SingleDatasetEvaluation.IouEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.SingleDatasetEvaluation.IouEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.SingleDatasetEvaluation.IouEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5064, + serialized_end=5154, +) + +_SINGLEDATASETEVALUATION = _descriptor.Descriptor( + name='SingleDatasetEvaluation', + full_name='mir.command200.SingleDatasetEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='conf_thr', full_name='mir.command200.SingleDatasetEvaluation.conf_thr', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_evaluations', full_name='mir.command200.SingleDatasetEvaluation.iou_evaluations', index=1, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_averaged_evaluation', full_name='mir.command200.SingleDatasetEvaluation.iou_averaged_evaluation', index=2, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=4863, + serialized_end=5166, +) + + +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY = _descriptor.Descriptor( + name='CiEvaluationsEntry', + full_name='mir.command200.SingleIouEvaluation.CiEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.SingleIouEvaluation.CiEvaluationsEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.SingleIouEvaluation.CiEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5345, + serialized_end=5438, +) + +_SINGLEIOUEVALUATION = _descriptor.Descriptor( + name='SingleIouEvaluation', + full_name='mir.command200.SingleIouEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ci_evaluations', full_name='mir.command200.SingleIouEvaluation.ci_evaluations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ci_averaged_evaluation', full_name='mir.command200.SingleIouEvaluation.ci_averaged_evaluation', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5169, + serialized_end=5444, +) + + +_SINGLEEVALUATIONELEMENT = _descriptor.Descriptor( + name='SingleEvaluationElement', + full_name='mir.command200.SingleEvaluationElement', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ap', full_name='mir.command200.SingleEvaluationElement.ap', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ar', full_name='mir.command200.SingleEvaluationElement.ar', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tp', full_name='mir.command200.SingleEvaluationElement.tp', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fp', full_name='mir.command200.SingleEvaluationElement.fp', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fn', full_name='mir.command200.SingleEvaluationElement.fn', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pr_curve', full_name='mir.command200.SingleEvaluationElement.pr_curve', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5447, + serialized_end=5578, +) + + +_INTPOINT = _descriptor.Descriptor( + name='IntPoint', + full_name='mir.command200.IntPoint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command200.IntPoint.x', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command200.IntPoint.y', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='z', full_name='mir.command200.IntPoint.z', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5580, + serialized_end=5623, +) + + +_FLOATPOINT = _descriptor.Descriptor( + name='FloatPoint', + full_name='mir.command200.FloatPoint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command200.FloatPoint.x', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command200.FloatPoint.y', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='z', full_name='mir.command200.FloatPoint.z', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5625, + serialized_end=5670, +) + + +_MIRCONTEXT_CKSCNTENTRY = _descriptor.Descriptor( + name='CksCntEntry', + full_name='mir.command200.MirContext.CksCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.MirContext.CksCntEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.MirContext.CksCntEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5884, + serialized_end=5961, +) + +_MIRCONTEXT = _descriptor.Descriptor( + name='MirContext', + full_name='mir.command200.MirContext', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='images_cnt', full_name='mir.command200.MirContext.images_cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='cks_cnt', full_name='mir.command200.MirContext.cks_cnt', index=1, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='total_asset_mbytes', full_name='mir.command200.MirContext.total_asset_mbytes', index=2, + number=11, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_stats', full_name='mir.command200.MirContext.pred_stats', index=3, + number=100, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_stats', full_name='mir.command200.MirContext.gt_stats', index=4, + number=101, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_MIRCONTEXT_CKSCNTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=5673, + serialized_end=6015, +) + + +_SINGLEMAPCOUNT_SUBCNTENTRY = _descriptor.Descriptor( + name='SubCntEntry', + full_name='mir.command200.SingleMapCount.SubCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.SingleMapCount.SubCntEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.SingleMapCount.SubCntEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6110, + serialized_end=6155, +) + +_SINGLEMAPCOUNT = _descriptor.Descriptor( + name='SingleMapCount', + full_name='mir.command200.SingleMapCount', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='cnt', full_name='mir.command200.SingleMapCount.cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='sub_cnt', full_name='mir.command200.SingleMapCount.sub_cnt', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEMAPCOUNT_SUBCNTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6018, + serialized_end=6155, +) + + +_ANNOSTATS_TAGSCNTENTRY = _descriptor.Descriptor( + name='TagsCntEntry', + full_name='mir.command200.AnnoStats.TagsCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.AnnoStats.TagsCntEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.AnnoStats.TagsCntEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6395, + serialized_end=6473, +) + +_ANNOSTATS_CLASSIDSCNTENTRY = _descriptor.Descriptor( + name='ClassIdsCntEntry', + full_name='mir.command200.AnnoStats.ClassIdsCntEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command200.AnnoStats.ClassIdsCntEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command200.AnnoStats.ClassIdsCntEntry.value', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6475, + serialized_end=6525, +) + +_ANNOSTATS = _descriptor.Descriptor( + name='AnnoStats', + full_name='mir.command200.AnnoStats', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='total_cnt', full_name='mir.command200.AnnoStats.total_cnt', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='positive_asset_cnt', full_name='mir.command200.AnnoStats.positive_asset_cnt', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='negative_asset_cnt', full_name='mir.command200.AnnoStats.negative_asset_cnt', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tags_cnt', full_name='mir.command200.AnnoStats.tags_cnt', index=3, + number=7, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='class_ids_cnt', full_name='mir.command200.AnnoStats.class_ids_cnt', index=4, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='eval_class_ids', full_name='mir.command200.AnnoStats.eval_class_ids', index=5, + number=9, type=5, cpp_type=1, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_ANNOSTATS_TAGSCNTENTRY, _ANNOSTATS_CLASSIDSCNTENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6158, + serialized_end=6543, +) + + +_EXPORTCONFIG = _descriptor.Descriptor( + name='ExportConfig', + full_name='mir.command200.ExportConfig', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='asset_format', full_name='mir.command200.ExportConfig.asset_format', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='asset_dir', full_name='mir.command200.ExportConfig.asset_dir', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='asset_index_file', full_name='mir.command200.ExportConfig.asset_index_file', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='asset_index_prefix', full_name='mir.command200.ExportConfig.asset_index_prefix', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='media_location', full_name='mir.command200.ExportConfig.media_location', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='need_sub_folder', full_name='mir.command200.ExportConfig.need_sub_folder', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='anno_format', full_name='mir.command200.ExportConfig.anno_format', index=6, + number=50, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_dir', full_name='mir.command200.ExportConfig.gt_dir', index=7, + number=51, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_index_file', full_name='mir.command200.ExportConfig.gt_index_file', index=8, + number=52, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_index_prefix', full_name='mir.command200.ExportConfig.gt_index_prefix', index=9, + number=53, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_dir', full_name='mir.command200.ExportConfig.pred_dir', index=10, + number=54, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_index_file', full_name='mir.command200.ExportConfig.pred_index_file', index=11, + number=55, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_index_prefix', full_name='mir.command200.ExportConfig.pred_index_prefix', index=12, + number=56, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tvt_index_dir', full_name='mir.command200.ExportConfig.tvt_index_dir', index=13, + number=57, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=6546, + serialized_end=6939, +) + +_MIRMETADATAS_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _METADATAATTRIBUTES +_MIRMETADATAS_ATTRIBUTESENTRY.containing_type = _MIRMETADATAS +_MIRMETADATAS.fields_by_name['attributes'].message_type = _MIRMETADATAS_ATTRIBUTESENTRY +_METADATAATTRIBUTES.fields_by_name['timestamp'].message_type = _TIMESTAMP +_METADATAATTRIBUTES.fields_by_name['tvt_type'].enum_type = _TVTTYPE +_METADATAATTRIBUTES.fields_by_name['asset_type'].enum_type = _ASSETTYPE +_MIRANNOTATIONS_IMAGECKSENTRY.fields_by_name['value'].message_type = _SINGLEIMAGECKS +_MIRANNOTATIONS_IMAGECKSENTRY.containing_type = _MIRANNOTATIONS +_MIRANNOTATIONS.fields_by_name['ground_truth'].message_type = _SINGLETASKANNOTATIONS +_MIRANNOTATIONS.fields_by_name['prediction'].message_type = _SINGLETASKANNOTATIONS +_MIRANNOTATIONS.fields_by_name['image_cks'].message_type = _MIRANNOTATIONS_IMAGECKSENTRY +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIMAGEANNOTATIONS +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY.containing_type = _SINGLETASKANNOTATIONS +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY.fields_by_name['value'].message_type = _INTPOINT +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY.containing_type = _SINGLETASKANNOTATIONS +_SINGLETASKANNOTATIONS.fields_by_name['image_annotations'].message_type = _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY +_SINGLETASKANNOTATIONS.fields_by_name['type'].enum_type = _ANNOTYPE +_SINGLETASKANNOTATIONS.fields_by_name['map_id_color'].message_type = _SINGLETASKANNOTATIONS_MAPIDCOLORENTRY +_SINGLETASKANNOTATIONS.fields_by_name['model'].message_type = _MODELMETA +_SINGLEIMAGEANNOTATIONS.fields_by_name['boxes'].message_type = _OBJECTANNOTATION +_SINGLEIMAGEANNOTATIONS.fields_by_name['polygons'].message_type = _OBJECTANNOTATION +_SINGLEIMAGEANNOTATIONS.fields_by_name['mask'].message_type = _MASKANNOTATION +_SINGLEIMAGECKS_CKSENTRY.containing_type = _SINGLEIMAGECKS +_SINGLEIMAGECKS.fields_by_name['cks'].message_type = _SINGLEIMAGECKS_CKSENTRY +_OBJECTANNOTATION_TAGSENTRY.containing_type = _OBJECTANNOTATION +_OBJECTANNOTATION.fields_by_name['box'].message_type = _RECT +_OBJECTANNOTATION.fields_by_name['tags'].message_type = _OBJECTANNOTATION_TAGSENTRY +_OBJECTANNOTATION.fields_by_name['cm'].enum_type = _CONFUSIONMATRIXTYPE +_OBJECTANNOTATION.fields_by_name['polygon'].message_type = _INTPOINT +_MIRKEYWORDS_CKIDXENTRY.fields_by_name['value'].message_type = _ASSETANNOINDEX +_MIRKEYWORDS_CKIDXENTRY.containing_type = _MIRKEYWORDS +_MIRKEYWORDS.fields_by_name['pred_idx'].message_type = _CITAGTOINDEX +_MIRKEYWORDS.fields_by_name['gt_idx'].message_type = _CITAGTOINDEX +_MIRKEYWORDS.fields_by_name['ck_idx'].message_type = _MIRKEYWORDS_CKIDXENTRY +_CITAGTOINDEX_CISENTRY.fields_by_name['value'].message_type = _MAPSTRINGTOINT32LIST +_CITAGTOINDEX_CISENTRY.containing_type = _CITAGTOINDEX +_CITAGTOINDEX_TAGSENTRY.fields_by_name['value'].message_type = _ASSETANNOINDEX +_CITAGTOINDEX_TAGSENTRY.containing_type = _CITAGTOINDEX +_CITAGTOINDEX.fields_by_name['cis'].message_type = _CITAGTOINDEX_CISENTRY +_CITAGTOINDEX.fields_by_name['tags'].message_type = _CITAGTOINDEX_TAGSENTRY +_MAPSTRINGTOINT32LIST_KEYIDSENTRY.fields_by_name['value'].message_type = _INT32LIST +_MAPSTRINGTOINT32LIST_KEYIDSENTRY.containing_type = _MAPSTRINGTOINT32LIST +_MAPSTRINGTOINT32LIST.fields_by_name['key_ids'].message_type = _MAPSTRINGTOINT32LIST_KEYIDSENTRY +_ASSETANNOINDEX_ASSETANNOSENTRY.fields_by_name['value'].message_type = _INT32LIST +_ASSETANNOINDEX_ASSETANNOSENTRY.containing_type = _ASSETANNOINDEX +_ASSETANNOINDEX_SUBINDEXESENTRY.fields_by_name['value'].message_type = _MAPSTRINGTOINT32LIST +_ASSETANNOINDEX_SUBINDEXESENTRY.containing_type = _ASSETANNOINDEX +_ASSETANNOINDEX.fields_by_name['asset_annos'].message_type = _ASSETANNOINDEX_ASSETANNOSENTRY +_ASSETANNOINDEX.fields_by_name['sub_indexes'].message_type = _ASSETANNOINDEX_SUBINDEXESENTRY +_MIRTASKS_TASKSENTRY.fields_by_name['value'].message_type = _TASK +_MIRTASKS_TASKSENTRY.containing_type = _MIRTASKS +_MIRTASKS.fields_by_name['tasks'].message_type = _MIRTASKS_TASKSENTRY +_TASK_NEWTYPESENTRY.containing_type = _TASK +_TASK.fields_by_name['type'].enum_type = _TASKTYPE +_TASK.fields_by_name['model'].message_type = _MODELMETA +_TASK.fields_by_name['evaluation'].message_type = _EVALUATION +_TASK.fields_by_name['new_types'].message_type = _TASK_NEWTYPESENTRY +_MODELMETA_STAGESENTRY.fields_by_name['value'].message_type = _MODELSTAGE +_MODELMETA_STAGESENTRY.containing_type = _MODELMETA +_MODELMETA.fields_by_name['stages'].message_type = _MODELMETA_STAGESENTRY +_EVALUATION_SUBCKSENTRY.fields_by_name['value'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION_SUBCKSENTRY.containing_type = _EVALUATION +_EVALUATION.fields_by_name['config'].message_type = _EVALUATECONFIG +_EVALUATION.fields_by_name['dataset_evaluation'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION.fields_by_name['main_ck'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION.fields_by_name['sub_cks'].message_type = _EVALUATION_SUBCKSENTRY +_EVALUATION.fields_by_name['state'].enum_type = _EVALUATIONSTATE +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIOUEVALUATION +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.containing_type = _SINGLEDATASETEVALUATION +_SINGLEDATASETEVALUATION.fields_by_name['iou_evaluations'].message_type = _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY +_SINGLEDATASETEVALUATION.fields_by_name['iou_averaged_evaluation'].message_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEEVALUATIONELEMENT +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION.fields_by_name['ci_evaluations'].message_type = _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY +_SINGLEIOUEVALUATION.fields_by_name['ci_averaged_evaluation'].message_type = _SINGLEEVALUATIONELEMENT +_SINGLEEVALUATIONELEMENT.fields_by_name['pr_curve'].message_type = _FLOATPOINT +_MIRCONTEXT_CKSCNTENTRY.fields_by_name['value'].message_type = _SINGLEMAPCOUNT +_MIRCONTEXT_CKSCNTENTRY.containing_type = _MIRCONTEXT +_MIRCONTEXT.fields_by_name['cks_cnt'].message_type = _MIRCONTEXT_CKSCNTENTRY +_MIRCONTEXT.fields_by_name['pred_stats'].message_type = _ANNOSTATS +_MIRCONTEXT.fields_by_name['gt_stats'].message_type = _ANNOSTATS +_SINGLEMAPCOUNT_SUBCNTENTRY.containing_type = _SINGLEMAPCOUNT +_SINGLEMAPCOUNT.fields_by_name['sub_cnt'].message_type = _SINGLEMAPCOUNT_SUBCNTENTRY +_ANNOSTATS_TAGSCNTENTRY.fields_by_name['value'].message_type = _SINGLEMAPCOUNT +_ANNOSTATS_TAGSCNTENTRY.containing_type = _ANNOSTATS +_ANNOSTATS_CLASSIDSCNTENTRY.containing_type = _ANNOSTATS +_ANNOSTATS.fields_by_name['tags_cnt'].message_type = _ANNOSTATS_TAGSCNTENTRY +_ANNOSTATS.fields_by_name['class_ids_cnt'].message_type = _ANNOSTATS_CLASSIDSCNTENTRY +_EXPORTCONFIG.fields_by_name['asset_format'].enum_type = _ASSETFORMAT +_EXPORTCONFIG.fields_by_name['anno_format'].enum_type = _ANNOFORMAT +DESCRIPTOR.message_types_by_name['MirMetadatas'] = _MIRMETADATAS +DESCRIPTOR.message_types_by_name['MetadataAttributes'] = _METADATAATTRIBUTES +DESCRIPTOR.message_types_by_name['Timestamp'] = _TIMESTAMP +DESCRIPTOR.message_types_by_name['MirAnnotations'] = _MIRANNOTATIONS +DESCRIPTOR.message_types_by_name['SingleTaskAnnotations'] = _SINGLETASKANNOTATIONS +DESCRIPTOR.message_types_by_name['SingleImageAnnotations'] = _SINGLEIMAGEANNOTATIONS +DESCRIPTOR.message_types_by_name['SingleImageCks'] = _SINGLEIMAGECKS +DESCRIPTOR.message_types_by_name['MaskAnnotation'] = _MASKANNOTATION +DESCRIPTOR.message_types_by_name['ObjectAnnotation'] = _OBJECTANNOTATION +DESCRIPTOR.message_types_by_name['Rect'] = _RECT +DESCRIPTOR.message_types_by_name['MirKeywords'] = _MIRKEYWORDS +DESCRIPTOR.message_types_by_name['CiTagToIndex'] = _CITAGTOINDEX +DESCRIPTOR.message_types_by_name['StringList'] = _STRINGLIST +DESCRIPTOR.message_types_by_name['MapStringToInt32List'] = _MAPSTRINGTOINT32LIST +DESCRIPTOR.message_types_by_name['Int32List'] = _INT32LIST +DESCRIPTOR.message_types_by_name['AssetAnnoIndex'] = _ASSETANNOINDEX +DESCRIPTOR.message_types_by_name['MirTasks'] = _MIRTASKS +DESCRIPTOR.message_types_by_name['Task'] = _TASK +DESCRIPTOR.message_types_by_name['ModelMeta'] = _MODELMETA +DESCRIPTOR.message_types_by_name['ModelStage'] = _MODELSTAGE +DESCRIPTOR.message_types_by_name['Evaluation'] = _EVALUATION +DESCRIPTOR.message_types_by_name['EvaluateConfig'] = _EVALUATECONFIG +DESCRIPTOR.message_types_by_name['SingleDatasetEvaluation'] = _SINGLEDATASETEVALUATION +DESCRIPTOR.message_types_by_name['SingleIouEvaluation'] = _SINGLEIOUEVALUATION +DESCRIPTOR.message_types_by_name['SingleEvaluationElement'] = _SINGLEEVALUATIONELEMENT +DESCRIPTOR.message_types_by_name['IntPoint'] = _INTPOINT +DESCRIPTOR.message_types_by_name['FloatPoint'] = _FLOATPOINT +DESCRIPTOR.message_types_by_name['MirContext'] = _MIRCONTEXT +DESCRIPTOR.message_types_by_name['SingleMapCount'] = _SINGLEMAPCOUNT +DESCRIPTOR.message_types_by_name['AnnoStats'] = _ANNOSTATS +DESCRIPTOR.message_types_by_name['ExportConfig'] = _EXPORTCONFIG +DESCRIPTOR.enum_types_by_name['TvtType'] = _TVTTYPE +DESCRIPTOR.enum_types_by_name['AssetType'] = _ASSETTYPE +DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE +DESCRIPTOR.enum_types_by_name['TaskState'] = _TASKSTATE +DESCRIPTOR.enum_types_by_name['Sha1Type'] = _SHA1TYPE +DESCRIPTOR.enum_types_by_name['MirStorage'] = _MIRSTORAGE +DESCRIPTOR.enum_types_by_name['AnnoFormat'] = _ANNOFORMAT +DESCRIPTOR.enum_types_by_name['AssetFormat'] = _ASSETFORMAT +DESCRIPTOR.enum_types_by_name['AnnoType'] = _ANNOTYPE +DESCRIPTOR.enum_types_by_name['ConfusionMatrixType'] = _CONFUSIONMATRIXTYPE +DESCRIPTOR.enum_types_by_name['EvaluationState'] = _EVALUATIONSTATE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +MirMetadatas = _reflection.GeneratedProtocolMessageType('MirMetadatas', (_message.Message,), { + + 'AttributesEntry' : _reflection.GeneratedProtocolMessageType('AttributesEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRMETADATAS_ATTRIBUTESENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirMetadatas.AttributesEntry) + }) + , + 'DESCRIPTOR' : _MIRMETADATAS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirMetadatas) + }) +_sym_db.RegisterMessage(MirMetadatas) +_sym_db.RegisterMessage(MirMetadatas.AttributesEntry) + +MetadataAttributes = _reflection.GeneratedProtocolMessageType('MetadataAttributes', (_message.Message,), { + 'DESCRIPTOR' : _METADATAATTRIBUTES, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MetadataAttributes) + }) +_sym_db.RegisterMessage(MetadataAttributes) + +Timestamp = _reflection.GeneratedProtocolMessageType('Timestamp', (_message.Message,), { + 'DESCRIPTOR' : _TIMESTAMP, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Timestamp) + }) +_sym_db.RegisterMessage(Timestamp) + +MirAnnotations = _reflection.GeneratedProtocolMessageType('MirAnnotations', (_message.Message,), { + + 'ImageCksEntry' : _reflection.GeneratedProtocolMessageType('ImageCksEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRANNOTATIONS_IMAGECKSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirAnnotations.ImageCksEntry) + }) + , + 'DESCRIPTOR' : _MIRANNOTATIONS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirAnnotations) + }) +_sym_db.RegisterMessage(MirAnnotations) +_sym_db.RegisterMessage(MirAnnotations.ImageCksEntry) + +SingleTaskAnnotations = _reflection.GeneratedProtocolMessageType('SingleTaskAnnotations', (_message.Message,), { + + 'ImageAnnotationsEntry' : _reflection.GeneratedProtocolMessageType('ImageAnnotationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleTaskAnnotations.ImageAnnotationsEntry) + }) + , + + 'MapIdColorEntry' : _reflection.GeneratedProtocolMessageType('MapIdColorEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLETASKANNOTATIONS_MAPIDCOLORENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleTaskAnnotations.MapIdColorEntry) + }) + , + 'DESCRIPTOR' : _SINGLETASKANNOTATIONS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleTaskAnnotations) + }) +_sym_db.RegisterMessage(SingleTaskAnnotations) +_sym_db.RegisterMessage(SingleTaskAnnotations.ImageAnnotationsEntry) +_sym_db.RegisterMessage(SingleTaskAnnotations.MapIdColorEntry) + +SingleImageAnnotations = _reflection.GeneratedProtocolMessageType('SingleImageAnnotations', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIMAGEANNOTATIONS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleImageAnnotations) + }) +_sym_db.RegisterMessage(SingleImageAnnotations) + +SingleImageCks = _reflection.GeneratedProtocolMessageType('SingleImageCks', (_message.Message,), { + + 'CksEntry' : _reflection.GeneratedProtocolMessageType('CksEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIMAGECKS_CKSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleImageCks.CksEntry) + }) + , + 'DESCRIPTOR' : _SINGLEIMAGECKS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleImageCks) + }) +_sym_db.RegisterMessage(SingleImageCks) +_sym_db.RegisterMessage(SingleImageCks.CksEntry) + +MaskAnnotation = _reflection.GeneratedProtocolMessageType('MaskAnnotation', (_message.Message,), { + 'DESCRIPTOR' : _MASKANNOTATION, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MaskAnnotation) + }) +_sym_db.RegisterMessage(MaskAnnotation) + +ObjectAnnotation = _reflection.GeneratedProtocolMessageType('ObjectAnnotation', (_message.Message,), { + + 'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), { + 'DESCRIPTOR' : _OBJECTANNOTATION_TAGSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.ObjectAnnotation.TagsEntry) + }) + , + 'DESCRIPTOR' : _OBJECTANNOTATION, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.ObjectAnnotation) + }) +_sym_db.RegisterMessage(ObjectAnnotation) +_sym_db.RegisterMessage(ObjectAnnotation.TagsEntry) + +Rect = _reflection.GeneratedProtocolMessageType('Rect', (_message.Message,), { + 'DESCRIPTOR' : _RECT, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Rect) + }) +_sym_db.RegisterMessage(Rect) + +MirKeywords = _reflection.GeneratedProtocolMessageType('MirKeywords', (_message.Message,), { + + 'CkIdxEntry' : _reflection.GeneratedProtocolMessageType('CkIdxEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRKEYWORDS_CKIDXENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirKeywords.CkIdxEntry) + }) + , + 'DESCRIPTOR' : _MIRKEYWORDS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirKeywords) + }) +_sym_db.RegisterMessage(MirKeywords) +_sym_db.RegisterMessage(MirKeywords.CkIdxEntry) + +CiTagToIndex = _reflection.GeneratedProtocolMessageType('CiTagToIndex', (_message.Message,), { + + 'CisEntry' : _reflection.GeneratedProtocolMessageType('CisEntry', (_message.Message,), { + 'DESCRIPTOR' : _CITAGTOINDEX_CISENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.CiTagToIndex.CisEntry) + }) + , + + 'TagsEntry' : _reflection.GeneratedProtocolMessageType('TagsEntry', (_message.Message,), { + 'DESCRIPTOR' : _CITAGTOINDEX_TAGSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.CiTagToIndex.TagsEntry) + }) + , + 'DESCRIPTOR' : _CITAGTOINDEX, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.CiTagToIndex) + }) +_sym_db.RegisterMessage(CiTagToIndex) +_sym_db.RegisterMessage(CiTagToIndex.CisEntry) +_sym_db.RegisterMessage(CiTagToIndex.TagsEntry) + +StringList = _reflection.GeneratedProtocolMessageType('StringList', (_message.Message,), { + 'DESCRIPTOR' : _STRINGLIST, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.StringList) + }) +_sym_db.RegisterMessage(StringList) + +MapStringToInt32List = _reflection.GeneratedProtocolMessageType('MapStringToInt32List', (_message.Message,), { + + 'KeyIdsEntry' : _reflection.GeneratedProtocolMessageType('KeyIdsEntry', (_message.Message,), { + 'DESCRIPTOR' : _MAPSTRINGTOINT32LIST_KEYIDSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MapStringToInt32List.KeyIdsEntry) + }) + , + 'DESCRIPTOR' : _MAPSTRINGTOINT32LIST, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MapStringToInt32List) + }) +_sym_db.RegisterMessage(MapStringToInt32List) +_sym_db.RegisterMessage(MapStringToInt32List.KeyIdsEntry) + +Int32List = _reflection.GeneratedProtocolMessageType('Int32List', (_message.Message,), { + 'DESCRIPTOR' : _INT32LIST, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Int32List) + }) +_sym_db.RegisterMessage(Int32List) + +AssetAnnoIndex = _reflection.GeneratedProtocolMessageType('AssetAnnoIndex', (_message.Message,), { + + 'AssetAnnosEntry' : _reflection.GeneratedProtocolMessageType('AssetAnnosEntry', (_message.Message,), { + 'DESCRIPTOR' : _ASSETANNOINDEX_ASSETANNOSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.AssetAnnoIndex.AssetAnnosEntry) + }) + , + + 'SubIndexesEntry' : _reflection.GeneratedProtocolMessageType('SubIndexesEntry', (_message.Message,), { + 'DESCRIPTOR' : _ASSETANNOINDEX_SUBINDEXESENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.AssetAnnoIndex.SubIndexesEntry) + }) + , + 'DESCRIPTOR' : _ASSETANNOINDEX, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.AssetAnnoIndex) + }) +_sym_db.RegisterMessage(AssetAnnoIndex) +_sym_db.RegisterMessage(AssetAnnoIndex.AssetAnnosEntry) +_sym_db.RegisterMessage(AssetAnnoIndex.SubIndexesEntry) + +MirTasks = _reflection.GeneratedProtocolMessageType('MirTasks', (_message.Message,), { + + 'TasksEntry' : _reflection.GeneratedProtocolMessageType('TasksEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRTASKS_TASKSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirTasks.TasksEntry) + }) + , + 'DESCRIPTOR' : _MIRTASKS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirTasks) + }) +_sym_db.RegisterMessage(MirTasks) +_sym_db.RegisterMessage(MirTasks.TasksEntry) + +Task = _reflection.GeneratedProtocolMessageType('Task', (_message.Message,), { + + 'NewTypesEntry' : _reflection.GeneratedProtocolMessageType('NewTypesEntry', (_message.Message,), { + 'DESCRIPTOR' : _TASK_NEWTYPESENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Task.NewTypesEntry) + }) + , + 'DESCRIPTOR' : _TASK, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Task) + }) +_sym_db.RegisterMessage(Task) +_sym_db.RegisterMessage(Task.NewTypesEntry) + +ModelMeta = _reflection.GeneratedProtocolMessageType('ModelMeta', (_message.Message,), { + + 'StagesEntry' : _reflection.GeneratedProtocolMessageType('StagesEntry', (_message.Message,), { + 'DESCRIPTOR' : _MODELMETA_STAGESENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.ModelMeta.StagesEntry) + }) + , + 'DESCRIPTOR' : _MODELMETA, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.ModelMeta) + }) +_sym_db.RegisterMessage(ModelMeta) +_sym_db.RegisterMessage(ModelMeta.StagesEntry) + +ModelStage = _reflection.GeneratedProtocolMessageType('ModelStage', (_message.Message,), { + 'DESCRIPTOR' : _MODELSTAGE, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.ModelStage) + }) +_sym_db.RegisterMessage(ModelStage) + +Evaluation = _reflection.GeneratedProtocolMessageType('Evaluation', (_message.Message,), { + + 'SubCksEntry' : _reflection.GeneratedProtocolMessageType('SubCksEntry', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATION_SUBCKSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Evaluation.SubCksEntry) + }) + , + 'DESCRIPTOR' : _EVALUATION, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.Evaluation) + }) +_sym_db.RegisterMessage(Evaluation) +_sym_db.RegisterMessage(Evaluation.SubCksEntry) + +EvaluateConfig = _reflection.GeneratedProtocolMessageType('EvaluateConfig', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATECONFIG, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.EvaluateConfig) + }) +_sym_db.RegisterMessage(EvaluateConfig) + +SingleDatasetEvaluation = _reflection.GeneratedProtocolMessageType('SingleDatasetEvaluation', (_message.Message,), { + + 'IouEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('IouEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleDatasetEvaluation.IouEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLEDATASETEVALUATION, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleDatasetEvaluation) + }) +_sym_db.RegisterMessage(SingleDatasetEvaluation) +_sym_db.RegisterMessage(SingleDatasetEvaluation.IouEvaluationsEntry) + +SingleIouEvaluation = _reflection.GeneratedProtocolMessageType('SingleIouEvaluation', (_message.Message,), { + + 'CiEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('CiEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleIouEvaluation.CiEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLEIOUEVALUATION, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleIouEvaluation) + }) +_sym_db.RegisterMessage(SingleIouEvaluation) +_sym_db.RegisterMessage(SingleIouEvaluation.CiEvaluationsEntry) + +SingleEvaluationElement = _reflection.GeneratedProtocolMessageType('SingleEvaluationElement', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEEVALUATIONELEMENT, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleEvaluationElement) + }) +_sym_db.RegisterMessage(SingleEvaluationElement) + +IntPoint = _reflection.GeneratedProtocolMessageType('IntPoint', (_message.Message,), { + 'DESCRIPTOR' : _INTPOINT, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.IntPoint) + }) +_sym_db.RegisterMessage(IntPoint) + +FloatPoint = _reflection.GeneratedProtocolMessageType('FloatPoint', (_message.Message,), { + 'DESCRIPTOR' : _FLOATPOINT, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.FloatPoint) + }) +_sym_db.RegisterMessage(FloatPoint) + +MirContext = _reflection.GeneratedProtocolMessageType('MirContext', (_message.Message,), { + + 'CksCntEntry' : _reflection.GeneratedProtocolMessageType('CksCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _MIRCONTEXT_CKSCNTENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirContext.CksCntEntry) + }) + , + 'DESCRIPTOR' : _MIRCONTEXT, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.MirContext) + }) +_sym_db.RegisterMessage(MirContext) +_sym_db.RegisterMessage(MirContext.CksCntEntry) + +SingleMapCount = _reflection.GeneratedProtocolMessageType('SingleMapCount', (_message.Message,), { + + 'SubCntEntry' : _reflection.GeneratedProtocolMessageType('SubCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEMAPCOUNT_SUBCNTENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleMapCount.SubCntEntry) + }) + , + 'DESCRIPTOR' : _SINGLEMAPCOUNT, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.SingleMapCount) + }) +_sym_db.RegisterMessage(SingleMapCount) +_sym_db.RegisterMessage(SingleMapCount.SubCntEntry) + +AnnoStats = _reflection.GeneratedProtocolMessageType('AnnoStats', (_message.Message,), { + + 'TagsCntEntry' : _reflection.GeneratedProtocolMessageType('TagsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _ANNOSTATS_TAGSCNTENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.AnnoStats.TagsCntEntry) + }) + , + + 'ClassIdsCntEntry' : _reflection.GeneratedProtocolMessageType('ClassIdsCntEntry', (_message.Message,), { + 'DESCRIPTOR' : _ANNOSTATS_CLASSIDSCNTENTRY, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.AnnoStats.ClassIdsCntEntry) + }) + , + 'DESCRIPTOR' : _ANNOSTATS, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.AnnoStats) + }) +_sym_db.RegisterMessage(AnnoStats) +_sym_db.RegisterMessage(AnnoStats.TagsCntEntry) +_sym_db.RegisterMessage(AnnoStats.ClassIdsCntEntry) + +ExportConfig = _reflection.GeneratedProtocolMessageType('ExportConfig', (_message.Message,), { + 'DESCRIPTOR' : _EXPORTCONFIG, + '__module__' : 'mir_command_200_pb2' + # @@protoc_insertion_point(class_scope:mir.command200.ExportConfig) + }) +_sym_db.RegisterMessage(ExportConfig) + + +DESCRIPTOR._options = None +_MIRMETADATAS_ATTRIBUTESENTRY._options = None +_MIRANNOTATIONS_IMAGECKSENTRY._options = None +_SINGLETASKANNOTATIONS_IMAGEANNOTATIONSENTRY._options = None +_SINGLETASKANNOTATIONS_MAPIDCOLORENTRY._options = None +_SINGLEIMAGECKS_CKSENTRY._options = None +_OBJECTANNOTATION_TAGSENTRY._options = None +_MIRKEYWORDS_CKIDXENTRY._options = None +_CITAGTOINDEX_CISENTRY._options = None +_CITAGTOINDEX_TAGSENTRY._options = None +_MAPSTRINGTOINT32LIST_KEYIDSENTRY._options = None +_ASSETANNOINDEX_ASSETANNOSENTRY._options = None +_ASSETANNOINDEX_SUBINDEXESENTRY._options = None +_MIRTASKS_TASKSENTRY._options = None +_TASK_NEWTYPESENTRY._options = None +_MODELMETA_STAGESENTRY._options = None +_EVALUATION_SUBCKSENTRY._options = None +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY._options = None +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY._options = None +_MIRCONTEXT_CKSCNTENTRY._options = None +_SINGLEMAPCOUNT_SUBCNTENTRY._options = None +_ANNOSTATS_TAGSCNTENTRY._options = None +_ANNOSTATS_CLASSIDSCNTENTRY._options = None +# @@protoc_insertion_point(module_scope) diff --git a/ymir/updater/app/mir/protos/update_proto_py.sh b/ymir/updater/app/mir/protos/update_proto_py.sh new file mode 100755 index 0000000000..823a814f69 --- /dev/null +++ b/ymir/updater/app/mir/protos/update_proto_py.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Python version == 3.8.10 +# protoc version == 3.13.0 +# https://github.com/protocolbuffers/protobuf/releases/download/v3.14.0 +# mv bin/protoc /usr/local +# mv include/google /usr/local/include/ + +# pip install protobuf==3.13.0 +# pip install mypy-protobuf==3.0.0 +# protoc-gen-go 1.28.1 + + +set -e + +INPUT_DIR="./" +OUTPUT_DIR="./" + +# gen protobuf py +protoc -I "$INPUT_DIR" \ + --python_out="$OUTPUT_DIR" \ + --plugin=protoc-gen-mypy=$(which protoc-gen-mypy) --mypy_out=$OUTPUT_DIR \ + $INPUT_DIR/mir_command*.proto + +touch $OUTPUT_DIR/__init__.py diff --git a/ymir/updater/app/mir/tools/mir_storage_ops_110.py b/ymir/updater/app/mir/tools/mir_storage_ops_110.py new file mode 100644 index 0000000000..d9d0ec77e2 --- /dev/null +++ b/ymir/updater/app/mir/tools/mir_storage_ops_110.py @@ -0,0 +1,88 @@ +from typing import Any, List, Dict + +from google.protobuf import json_format + +from mir.tools import exodus, revs_parser + +from mir.protos import mir_command_110_pb2 as mirpb + + +class MirStorageOps(): + # public: save and load + @classmethod + def load_single_storage(cls, + mir_root: str, + mir_branch: str, + ms: 'mirpb.MirStorage.V', + mir_task_id: str = '', + as_dict: bool = False) -> Any: + rev = revs_parser.join_rev_tid(mir_branch, mir_task_id) + + mir_storage_data = _mir_type(ms)() + mir_storage_data.ParseFromString(exodus.read_mir(mir_root=mir_root, rev=rev, + file_name=_mir_path(ms))) + + if as_dict: + mir_storage_data = cls.__message_to_dict(mir_storage_data) + + return mir_storage_data + + @classmethod + def load_multiple_storages(cls, + mir_root: str, + mir_branch: str, + ms_list: List['mirpb.MirStorage.V'], + mir_task_id: str = '', + as_dict: bool = False) -> List[Any]: + return [ + cls.load_single_storage( + mir_root=mir_root, + mir_branch=mir_branch, + ms=ms, + mir_task_id=mir_task_id, + as_dict=as_dict, + ) for ms in ms_list + ] + + @classmethod + def __message_to_dict(cls, message: Any) -> Dict: + return json_format.MessageToDict(message, + preserving_proto_field_name=True, + use_integers_for_enums=True, + including_default_value_fields=True) + + +def _mir_type(ms: 'mirpb.MirStorage.V') -> Any: + MIR_TYPE = { + mirpb.MirStorage.MIR_METADATAS: mirpb.MirMetadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mirpb.MirAnnotations, + mirpb.MirStorage.MIR_KEYWORDS: mirpb.MirKeywords, + mirpb.MirStorage.MIR_TASKS: mirpb.MirTasks, + mirpb.MirStorage.MIR_CONTEXT: mirpb.MirContext, + } + return MIR_TYPE[ms] + + +def _mir_path(ms: 'mirpb.MirStorage.V') -> str: + MIR_PATH = { + mirpb.MirStorage.MIR_METADATAS: 'metadatas.mir', + mirpb.MirStorage.MIR_ANNOTATIONS: 'annotations.mir', + mirpb.MirStorage.MIR_KEYWORDS: 'keywords.mir', + mirpb.MirStorage.MIR_TASKS: 'tasks.mir', + mirpb.MirStorage.MIR_CONTEXT: 'context.mir', + } + return MIR_PATH[ms] + + +def get_all_mir_paths() -> List[str]: + return [_mir_path(ms) for ms in get_all_mir_storage()] + + +def get_all_mir_storage() -> List['mirpb.MirStorage.V']: + return [ + mirpb.MirStorage.MIR_METADATAS, + mirpb.MirStorage.MIR_ANNOTATIONS, + mirpb.MirStorage.MIR_KEYWORDS, + mirpb.MirStorage.MIR_TASKS, + mirpb.MirStorage.MIR_CONTEXT, + ] diff --git a/ymir/updater/app/mir/tools/mir_storage_ops_200.py b/ymir/updater/app/mir/tools/mir_storage_ops_200.py new file mode 100644 index 0000000000..d18a446d00 --- /dev/null +++ b/ymir/updater/app/mir/tools/mir_storage_ops_200.py @@ -0,0 +1,390 @@ +from functools import reduce +from math import ceil +import os +import time +from typing import Any, List, Dict, Optional + +import fasteners # type: ignore +from google.protobuf import json_format + +from mir import scm +from mir.commands.checkout import CmdCheckout +from mir.commands.commit import CmdCommit +from mir.tools import exodus +from mir.tools import mir_repo_utils, revs_parser +from mir.tools import settings as mir_settings +from mir.tools.code import MirCode +from mir.tools.errors import MirRuntimeError + +from mir.protos import mir_command_200_pb2 as mirpb + + +def create_evaluate_config(conf_thr: float = mir_settings.DEFAULT_EVALUATE_CONF_THR, + iou_thrs: str = mir_settings.DEFAULT_EVALUATE_IOU_THR, + need_pr_curve: bool = False, + class_ids: List[int] = []) -> mirpb.EvaluateConfig: + evaluate_config = mirpb.EvaluateConfig() + evaluate_config.conf_thr = conf_thr + evaluate_config.iou_thrs_interval = iou_thrs + evaluate_config.need_pr_curve = need_pr_curve + evaluate_config.class_ids[:] = class_ids + return evaluate_config + + +class MirStorageOps(): + # private: save and load + @classmethod + def __build_task_keyword_context(cls, mir_datas: Dict['mirpb.MirStorage.V', Any], task: mirpb.Task, + evaluate_config: mirpb.EvaluateConfig) -> None: + # add default members + mir_metadatas: mirpb.MirMetadatas = mir_datas[mirpb.MirStorage.MIR_METADATAS] + mir_annotations: mirpb.MirAnnotations = mir_datas[mirpb.MirStorage.MIR_ANNOTATIONS] + mir_annotations.prediction.task_id = task.task_id + mir_annotations.ground_truth.task_id = task.task_id + + # build mir_tasks + mir_tasks: mirpb.MirTasks = mirpb.MirTasks() + mir_tasks.head_task_id = task.task_id + mir_tasks.tasks[mir_tasks.head_task_id].CopyFrom(task) + # TODO: evaluation skipped in updater + # evaluation = det_eval_ops.det_evaluate_with_pb( + # prediction=mir_annotations.prediction, + # ground_truth=mir_annotations.ground_truth, + # config=evaluate_config, + # ) + # if evaluation: + # mir_tasks.tasks[mir_tasks.head_task_id].evaluation.CopyFrom(evaluation) + mir_datas[mirpb.MirStorage.MIR_TASKS] = mir_tasks + + # gen mir_keywords + mir_keywords: mirpb.MirKeywords = mirpb.MirKeywords() + cls.__build_mir_keywords_ci_tag(task_annotations=mir_annotations.prediction, + keyword_to_index=mir_keywords.pred_idx) + cls.__build_mir_keywords_ci_tag(task_annotations=mir_annotations.ground_truth, + keyword_to_index=mir_keywords.gt_idx) + # ck to assets + for asset_id, image_cks in mir_annotations.image_cks.items(): + for k, v in image_cks.cks.items(): + mir_keywords.ck_idx[k].asset_annos[asset_id] # empty record to asset id + mir_keywords.ck_idx[k].sub_indexes[v].key_ids[asset_id] # empty record to asset id + mir_datas[mirpb.MirStorage.MIR_KEYWORDS] = mir_keywords + + # gen mir_context + mir_context = mirpb.MirContext() + cls.__build_mir_context(mir_metadatas=mir_metadatas, + mir_annotations=mir_annotations, + mir_keywords=mir_keywords, + mir_context=mir_context) + mir_datas[mirpb.MirStorage.MIR_CONTEXT] = mir_context + + @classmethod + def __build_mir_keywords_ci_tag(cls, task_annotations: mirpb.SingleTaskAnnotations, + keyword_to_index: mirpb.CiTagToIndex) -> None: + task_cis = set() + for asset_id, single_image_annotations in task_annotations.image_annotations.items(): + image_cis = set() + for annotation in single_image_annotations.boxes: + image_cis.add(annotation.class_id) + # ci to annos + keyword_to_index.cis[annotation.class_id].key_ids[asset_id].ids.append(annotation.index) + + # tags to annos + for k, v in annotation.tags.items(): + keyword_to_index.tags[k].asset_annos[asset_id].ids.append(annotation.index) + keyword_to_index.tags[k].sub_indexes[v].key_ids[asset_id].ids.append(annotation.index) + + single_image_annotations.img_class_ids[:] = image_cis + task_cis.update(image_cis) + + task_annotations.task_class_ids[:] = task_cis + + @classmethod + def __build_mir_context_stats(cls, anno_stats: mirpb.AnnoStats, mir_metadatas: mirpb.MirMetadatas, + task_annotations: mirpb.SingleTaskAnnotations, + keyword_to_index: mirpb.CiTagToIndex) -> None: + image_annotations = task_annotations.image_annotations + + anno_stats.eval_class_ids[:] = task_annotations.eval_class_ids + + # anno_stats.asset_cnt + anno_stats.positive_asset_cnt = len(image_annotations) + anno_stats.negative_asset_cnt = len(mir_metadatas.attributes) - len(image_annotations) + + anno_stats.total_cnt = sum([len(image_annotation.boxes) for image_annotation in image_annotations.values()]) + + # anno_stats.cis_cnt + for ci, ci_assets in keyword_to_index.cis.items(): + anno_stats.class_ids_cnt[ci] = len(ci_assets.key_ids) + + # anno_stats.tags_cnt + for tag, tag_to_annos in keyword_to_index.tags.items(): + for anno_idxes in tag_to_annos.asset_annos.values(): + anno_stats.tags_cnt[tag].cnt += len(anno_idxes.ids) + + for sub_tag, sub_tag_to_annos in tag_to_annos.sub_indexes.items(): + for anno_idxes in sub_tag_to_annos.key_ids.values(): + anno_stats.tags_cnt[tag].sub_cnt[sub_tag] += len(anno_idxes.ids) + + @classmethod + def __build_mir_context(cls, mir_metadatas: mirpb.MirMetadatas, mir_annotations: mirpb.MirAnnotations, + mir_keywords: mirpb.MirKeywords, mir_context: mirpb.MirContext) -> None: + mir_context.images_cnt = len(mir_metadatas.attributes) + total_asset_bytes = reduce(lambda s, v: s + v.byte_size, mir_metadatas.attributes.values(), 0) + mir_context.total_asset_mbytes = ceil(total_asset_bytes / mir_settings.BYTES_PER_MB) + + # cks cnt + for ck, ck_assets in mir_keywords.ck_idx.items(): + mir_context.cks_cnt[ck].cnt = len(ck_assets.asset_annos) + for sub_ck, sub_ck_to_assets in ck_assets.sub_indexes.items(): + mir_context.cks_cnt[ck].sub_cnt[sub_ck] = len(sub_ck_to_assets.key_ids) + + cls.__build_mir_context_stats(anno_stats=mir_context.pred_stats, + mir_metadatas=mir_metadatas, + task_annotations=mir_annotations.prediction, + keyword_to_index=mir_keywords.pred_idx) + cls.__build_mir_context_stats(anno_stats=mir_context.gt_stats, + mir_metadatas=mir_metadatas, + task_annotations=mir_annotations.ground_truth, + keyword_to_index=mir_keywords.gt_idx) + + @classmethod + def __add_git_tag(cls, mir_root: str, tag: str) -> None: + repo_git = scm.Scm(root_dir=mir_root, scm_executable='git') + repo_git.tag(tag) + + # public: save and load + @classmethod + def save_and_commit(cls, + mir_root: str, + mir_branch: str, + his_branch: Optional[str], + mir_datas: Dict, + task: mirpb.Task, + evaluate_config: Optional[mirpb.EvaluateConfig] = None) -> int: + """ + saves and commit all contents in mir_datas to branch: `mir_branch`; + branch will be created if not exists, and it's history will be after `his_branch` + + Args: + mir_root (str): path to mir repo + mir_branch (str): branch you wish to save to, if not exists, create new one + his_branch (Optional[str]): if `mir_branch` not exists, this is the branch where you wish to start with + mir_datas (Dict[mirpb.MirStorage.V, pb_message.Message]): datas you wish to save, need no mir_keywords, + mir_tasks is needed, if mir_metadatas and mir_annotations not provided, they will be created as empty + datasets + task (mirpb.Task): task for this commit + evaluate_config (mirpb.EvaluateConfig): evaluate config + + Raises: + MirRuntimeError + + Returns: + int: result code + """ + if not mir_root: + mir_root = '.' + if not mir_branch: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="empty mir branch") + if mirpb.MirStorage.MIR_METADATAS not in mir_datas or mirpb.MirStorage.MIR_ANNOTATIONS not in mir_datas: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='need mir_metadatas and mir_annotations') + if mirpb.MirStorage.MIR_KEYWORDS in mir_datas: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='need no mir_keywords') + if mirpb.MirStorage.MIR_CONTEXT in mir_datas: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='need no mir_context') + if mirpb.MirStorage.MIR_TASKS in mir_datas: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='need no mir_tasks') + if not task.name: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="empty commit message") + if not task.task_id: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='empty task id') + + if not evaluate_config: + evaluate_config = create_evaluate_config() + + # Build all mir_datas. + cls.__build_task_keyword_context(mir_datas=mir_datas, + task=task, + evaluate_config=evaluate_config) + + branch_exists = mir_repo_utils.mir_check_branch_exists(mir_root=mir_root, branch=mir_branch) + if not branch_exists and not his_branch: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_BRANCH_OR_TAG, + error_message=f"branch {mir_branch} not exists, and his_branch not provided") + + # checkout to proper branch + # if mir_branch exists, checkout mir_branch + # if not exists, checkout his_branch, and checkout -b mir_branch + if branch_exists: + his_branch = mir_branch + + lock = fasteners.InterProcessLock(os.path.join(mir_root, '.mir_lock')) + with lock: + # checkout to his branch + # cmd checkout also checks whether current branch is clean + return_code = CmdCheckout.run_with_args(mir_root=mir_root, commit_id=str(his_branch), branch_new=False) + if return_code != MirCode.RC_OK: + return return_code + + # create dest_branch if not exists + if not branch_exists: + return_code = CmdCheckout.run_with_args(mir_root=mir_root, commit_id=mir_branch, branch_new=True) + if return_code != MirCode.RC_OK: + return return_code + + # save to file + for ms, mir_data in mir_datas.items(): + mir_file_path = os.path.join(mir_root, _mir_path(ms)) + with open(mir_file_path, "wb") as m_f: + m_f.write(mir_data.SerializeToString()) + + ret_code = CmdCommit.run_with_args(mir_root=mir_root, msg=task.name) + if ret_code != MirCode.RC_OK: + return ret_code + + # also have a tag for this commit + cls.__add_git_tag(mir_root=mir_root, tag=revs_parser.join_rev_tid(mir_branch, task.task_id)) + + return ret_code + + # public: load + @classmethod + def load_single_storage(cls, + mir_root: str, + mir_branch: str, + ms: 'mirpb.MirStorage.V', + mir_task_id: str = '', + as_dict: bool = False) -> Any: + rev = revs_parser.join_rev_tid(mir_branch, mir_task_id) + + mir_storage_data = _mir_type(ms)() + mir_storage_data.ParseFromString(exodus.read_mir(mir_root=mir_root, rev=rev, + file_name=_mir_path(ms))) + + if as_dict: + mir_storage_data = cls.__message_to_dict(mir_storage_data) + + return mir_storage_data + + @classmethod + def load_multiple_storages(cls, + mir_root: str, + mir_branch: str, + ms_list: List['mirpb.MirStorage.V'], + mir_task_id: str = '', + as_dict: bool = False) -> List[Any]: + return [ + cls.load_single_storage( + mir_root=mir_root, + mir_branch=mir_branch, + ms=ms, + mir_task_id=mir_task_id, + as_dict=as_dict, + ) for ms in ms_list + ] + + @classmethod + def __message_to_dict(cls, message: Any) -> Dict: + return json_format.MessageToDict(message, + preserving_proto_field_name=True, + use_integers_for_enums=True, + including_default_value_fields=True) + + +def create_task(task_type: 'mirpb.TaskType.V', + task_id: str, + message: str, + new_types: Dict[str, int] = {}, + new_types_added: bool = False, + return_code: int = 0, + return_msg: str = '', + serialized_task_parameters: str = '', + serialized_executor_config: str = '', + executor: str = '', + model_meta: mirpb.ModelMeta = None, + evaluation: mirpb.Evaluation = None, + src_revs: str = '', + dst_rev: str = '') -> mirpb.Task: + task_dict = { + 'type': task_type, + 'name': message, + 'task_id': task_id, + 'timestamp': int(time.time()), + 'return_code': return_code, + 'return_msg': return_msg, + 'serialized_task_parameters': serialized_task_parameters, + 'serialized_executor_config': serialized_executor_config, + 'new_types': new_types, + 'new_types_added': new_types_added, + 'executor': executor, + 'src_revs': src_revs, + 'dst_rev': dst_rev, + } + task: mirpb.Task = mirpb.Task() + json_format.ParseDict(task_dict, task) + + if model_meta: + task.model.CopyFrom(model_meta) + + if evaluation: + task.evaluation.CopyFrom(evaluation) + + return task + + +def _mir_type(ms: 'mirpb.MirStorage.V') -> Any: + MIR_TYPE = { + mirpb.MirStorage.MIR_METADATAS: mirpb.MirMetadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mirpb.MirAnnotations, + mirpb.MirStorage.MIR_KEYWORDS: mirpb.MirKeywords, + mirpb.MirStorage.MIR_TASKS: mirpb.MirTasks, + mirpb.MirStorage.MIR_CONTEXT: mirpb.MirContext, + } + return MIR_TYPE[ms] + + +def _mir_path(ms: 'mirpb.MirStorage.V') -> str: + MIR_PATH = { + mirpb.MirStorage.MIR_METADATAS: 'metadatas.mir', + mirpb.MirStorage.MIR_ANNOTATIONS: 'annotations.mir', + mirpb.MirStorage.MIR_KEYWORDS: 'keywords.mir', + mirpb.MirStorage.MIR_TASKS: 'tasks.mir', + mirpb.MirStorage.MIR_CONTEXT: 'context.mir', + } + return MIR_PATH[ms] + + +def get_all_mir_paths() -> List[str]: + return [_mir_path(ms) for ms in get_all_mir_storage()] + + +def get_all_mir_storage() -> List['mirpb.MirStorage.V']: + return [ + mirpb.MirStorage.MIR_METADATAS, + mirpb.MirStorage.MIR_ANNOTATIONS, + mirpb.MirStorage.MIR_KEYWORDS, + mirpb.MirStorage.MIR_TASKS, + mirpb.MirStorage.MIR_CONTEXT, + ] + + +def locate_asset_path(location: str, hash: str) -> str: + asset_path = get_asset_storage_path(location=location, hash=hash, make_dirs=False, need_sub_folder=True) + if os.path.isfile(asset_path): + return asset_path + + asset_path = get_asset_storage_path(location=location, hash=hash, make_dirs=False, need_sub_folder=False) + if os.path.isfile(asset_path): + return asset_path + + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message=f"cannot locate asset: {hash}") + + +def get_asset_storage_path(location: str, hash: str, make_dirs: bool = True, need_sub_folder: bool = True) -> str: + if not need_sub_folder: + return os.path.join(location, hash) + + sub_dir = os.path.join(location, hash[-2:]) + if make_dirs: + os.makedirs(sub_dir, exist_ok=True) + return os.path.join(sub_dir, hash) diff --git a/ymir/updater/app/requirements.txt b/ymir/updater/app/requirements.txt new file mode 100644 index 0000000000..4e09586d6f --- /dev/null +++ b/ymir/updater/app/requirements.txt @@ -0,0 +1,6 @@ +fasteners==0.16.3 +protobuf==3.18.1 +pyyaml==5.4.1 +pydantic==1.9.0 + +--no-binary=pydantic diff --git a/ymir/updater/app/start.py b/ymir/updater/app/start.py new file mode 100644 index 0000000000..8cb5ec4026 --- /dev/null +++ b/ymir/updater/app/start.py @@ -0,0 +1,31 @@ +import logging +import os +import sys + +from common_utils.sandbox_updater import update +from common_utils.sandbox_util import detect_sandbox_src_versions +from mir.version import YMIR_VERSION + + +def main() -> int: + sandbox_root = os.environ['BACKEND_SANDBOX_ROOT'] + + sandbox_versions = detect_sandbox_src_versions(sandbox_root) + if len(sandbox_versions) != 1: + raise Exception(f"invalid sandbox versions: {sandbox_versions}") + + update(sandbox_root=sandbox_root, + assets_root=os.environ['ASSETS_PATH'], + models_root=os.environ['MODELS_PATH'], + src_ver=sandbox_versions[0], + dst_ver=YMIR_VERSION) + + return 0 + + +if __name__ == '__main__': + logging.basicConfig(stream=sys.stdout, + format='[%(asctime)s]: %(message)s', + datefmt='%Y%m%d-%H:%M:%S', + level=logging.INFO) + sys.exit(main()) diff --git a/ymir/updater/app/tools.py b/ymir/updater/app/tools.py new file mode 100644 index 0000000000..efcdd72853 --- /dev/null +++ b/ymir/updater/app/tools.py @@ -0,0 +1,26 @@ + +import os +import re +from typing import List + +from mir.scm.cmd import CmdScm + + +# repo funcs +def get_repo_tags(mir_root: str) -> List[str]: + git_cmd = CmdScm(working_dir=mir_root, scm_executable='git') + tags: str = git_cmd.tag() + return sorted(tags.splitlines()) + + +def remove_old_tag(mir_root: str, tag: str) -> None: + git_cmd = CmdScm(working_dir=mir_root, scm_executable='git') + git_cmd.tag(['-d', tag]) + + +# detect models +def get_model_hashes(models_root: str) -> List[str]: + return [ + h for h in os.listdir(models_root) + if re.match(pattern=r'^.{40}$', string=h) and os.path.isfile(os.path.join(models_root, h)) + ] diff --git a/ymir/backend/src/ymir_viz/src/controllers/__init__.py b/ymir/updater/app/update_1_1_0_to_2_0_0/__init__.py similarity index 100% rename from ymir/backend/src/ymir_viz/src/controllers/__init__.py rename to ymir/updater/app/update_1_1_0_to_2_0_0/__init__.py diff --git a/ymir/updater/app/update_1_1_0_to_2_0_0/step_updater.py b/ymir/updater/app/update_1_1_0_to_2_0_0/step_updater.py new file mode 100644 index 0000000000..27e9b9cbc3 --- /dev/null +++ b/ymir/updater/app/update_1_1_0_to_2_0_0/step_updater.py @@ -0,0 +1,255 @@ +""" +Updater from 1.1.0 to 1.3.0 + +# Update items for mir repos +* `MirMetadatas`: + * add `byte_size` to `MetadataAttributes` +* `MirAnnotations` + * use previous annotations as both `prediction` and `ground_truth` +* `MirTasks`: + * create default_best_stage + * ignore all evaluation result in previous datasets +* `MirKeywords` & `MirContext`: + * regenerated using new data structures + +# Update items for user models +* update structure of all model packages +""" + +import logging +import os +import re +import shutil +import tarfile +import time +from typing import List, Tuple + +from google.protobuf.json_format import MessageToDict, ParseDict +import yaml + +from id_definition.task_id import IDProto +from mir.tools import revs_parser, models +from mir.protos import mir_command_110_pb2 as pb_src, mir_command_200_pb2 as pb_dst +from mir.tools import mir_storage_ops_110 as mso_src, mir_storage_ops_200 as mso_dst +from mir.version import ymir_model_salient_version, DEFAULT_YMIR_SRC_VERSION + +from tools import get_repo_tags, remove_old_tag, get_model_hashes + +_MirDatasSrc = Tuple[pb_src.MirMetadatas, pb_src.MirAnnotations, pb_src.Task] +_MirDatasDst = Tuple[pb_dst.MirMetadatas, pb_dst.MirAnnotations, pb_dst.Task] + +_DEFAULT_STAGE_NAME = 'default_best_stage' +_SRC_YMIR_VER = '1.1.0' +_DST_YMIR_VER = '2.0.0' + + +# update user repo +def update_repo(mir_root: str, assets_root: str, models_root: str) -> None: + logging.info(f"updating repo: {mir_root}, {_SRC_YMIR_VER} -> {_DST_YMIR_VER}") + + for tag in get_repo_tags(mir_root): + if not re.match(f"^.{{{IDProto.ID_LENGTH}}}@.{{{IDProto.ID_LENGTH}}}$", tag): + logging.info(f" skip: {tag}") + continue + + logging.info(f" updating: {tag}") + rev_tid = revs_parser.parse_single_arg_rev(src_rev=tag, need_tid=True) + datas_src = _load(mir_root, rev_tid) + datas_dst = _update(datas_src, assets_root, models_root) + _save(mir_root, rev_tid, datas_dst) + + +def _load(mir_root: str, rev_tid: revs_parser.TypRevTid) -> _MirDatasSrc: + m, a, t = mso_src.MirStorageOps.load_multiple_storages( + mir_root=mir_root, + mir_branch=rev_tid.rev, + mir_task_id=rev_tid.tid, + ms_list=[pb_src.MIR_METADATAS, pb_src.MIR_ANNOTATIONS, pb_src.MIR_TASKS]) + return (m, a, t.tasks[t.head_task_id]) + + +def _update(datas_src: _MirDatasSrc, assets_root: str, models_root: str) -> _MirDatasDst: + mir_metadatas_src, mir_annotations_src, task_src = datas_src + return (_update_metadatas(mir_metadatas_src, assets_root), _update_annotations(mir_annotations_src), + _update_task(task_src, models_root)) + + +def _save(mir_root: str, rev_tid: revs_parser.TypRevTid, datas_dst: _MirDatasDst) -> None: + # remove old tag + remove_old_tag(mir_root=mir_root, tag=rev_tid.rev_tid) + # save + mir_metadatas_dst, mir_annotations_dst, task_dst = datas_dst + mso_dst.MirStorageOps.save_and_commit(mir_root=mir_root, + mir_branch=rev_tid.rev, + his_branch=rev_tid.rev, + mir_datas={ + pb_dst.MirStorage.MIR_METADATAS: mir_metadatas_dst, + pb_dst.MirStorage.MIR_ANNOTATIONS: mir_annotations_dst, + }, + task=task_dst) + + +def _update_metadatas(mir_metadatas_src: pb_src.MirMetadatas, assets_root: str) -> pb_dst.MirMetadatas: + mir_metadatas_dst = pb_dst.MirMetadatas() + for asset_id, attr_src in mir_metadatas_src.attributes.items(): + attr_dst = pb_dst.MetadataAttributes(tvt_type=attr_src.tvt_type, + asset_type=attr_src.asset_type, + width=attr_src.width, + height=attr_src.height, + image_channels=attr_src.image_channels) + + attr_dst.timestamp.start = attr_src.timestamp.start + attr_dst.timestamp.duration = attr_src.timestamp.duration + + asset_path = mso_dst.locate_asset_path(location=assets_root, hash=asset_id) + attr_dst.byte_size = os.stat(asset_path).st_size if asset_path else 0 + + mir_metadatas_dst.attributes[asset_id].CopyFrom(attr_dst) + return mir_metadatas_dst + + +def _update_annotations(mir_annotations_src: pb_src.MirAnnotations) -> pb_dst.MirAnnotations: + task_annotations_src = mir_annotations_src.task_annotations[mir_annotations_src.head_task_id] + + mir_annotations_dst = pb_dst.MirAnnotations() + for asset_id, single_image_annotations_src in task_annotations_src.image_annotations.items(): + single_image_annotations_dst = mir_annotations_dst.ground_truth.image_annotations[asset_id] + for annotation_src in single_image_annotations_src.annotations: + object_annotation_dst = pb_dst.ObjectAnnotation() + ParseDict(MessageToDict(annotation_src, preserving_proto_field_name=True, use_integers_for_enums=True), + object_annotation_dst) + object_annotation_dst.anno_quality = -1 + object_annotation_dst.cm = pb_dst.ConfusionMatrixType.NotSet + object_annotation_dst.det_link_id = -1 + single_image_annotations_dst.boxes.append(object_annotation_dst) + + mir_annotations_dst.ground_truth.task_id = mir_annotations_src.head_task_id + mir_annotations_dst.ground_truth.type = pb_dst.AnnoType.AT_DET_BOX + + return mir_annotations_dst + + +def _update_task(task_src: pb_src.Task, models_root: str) -> pb_dst.Task: + task_dst = pb_dst.Task(type=task_src.type, + name=task_src.name, + task_id=task_src.task_id, + timestamp=task_src.timestamp, + return_code=task_src.return_code, + return_msg=task_src.return_msg, + serialized_task_parameters=task_src.serialized_task_parameters, + serialized_executor_config=task_src.serialized_executor_config, + src_revs=task_src.src_revs, + dst_rev=task_src.dst_rev, + executor=task_src.executor) + for k, v in task_src.unknown_types.items(): + task_dst.new_types[k] = v + task_dst.new_types_added = (len(task_dst.new_types) > 0) + + # model meta + model_src = task_src.model + if model_src.model_hash: + model_dst = pb_dst.ModelMeta(model_hash=model_src.model_hash, + mean_average_precision=model_src.mean_average_precision, + context=model_src.context, + best_stage_name=_DEFAULT_STAGE_NAME) + + stage_dst = pb_dst.ModelStage(stage_name=_DEFAULT_STAGE_NAME, + mAP=model_src.mean_average_precision, + timestamp=task_src.timestamp) + stage_dst.files[:] = _get_model_file_names(os.path.join(models_root, model_src.model_hash)) + model_dst.stages[_DEFAULT_STAGE_NAME].CopyFrom(stage_dst) + + model_dst.class_names[:] = _get_model_class_names(task_src.serialized_executor_config) + task_dst.model.CopyFrom(model_dst) + + # evaluation: no need to update + + return task_dst + + +def _get_model_file_names(model_path: str) -> List[str]: + with tarfile.open(model_path, 'r') as f: + file_names = [x.name for x in f.getmembers() if x.name != 'ymir-info.yaml'] + return file_names + + +def _get_model_class_names(serialized_executor_config: str) -> List[str]: + if not serialized_executor_config: + return [] + + executor_config = yaml.safe_load(serialized_executor_config) + return executor_config.get('class_names', []) + + +# update models root +def update_models(models_root: str) -> None: + logging.info(f"updating models: {models_root}") + model_work_dir = os.path.join(models_root, 'work_dir') + + for model_hash in get_model_hashes(models_root): + logging.info(f"model hash: {model_hash}") + + if os.path.isdir(model_work_dir): + shutil.rmtree(model_work_dir) + os.makedirs(model_work_dir, exist_ok=False) + + model_path = os.path.join(models_root, model_hash) + + # extract + with tarfile.open(model_path, 'r') as f: + f.extractall(model_work_dir) + + os.remove(model_path) + with open(os.path.join(model_work_dir, 'ymir-info.yaml'), 'r') as f: + ymir_info_src = yaml.safe_load(f.read()) + _check_model(ymir_info_src) + + # check model producer version + package_version = ymir_info_src.get('package_version', DEFAULT_YMIR_SRC_VERSION) + if ymir_model_salient_version(package_version) == ymir_model_salient_version(_DST_YMIR_VER): + logging.info(' no need to update, skip') + continue + + # update ymir-info.yaml + executor_config_dict = ymir_info_src.get('executor_config', {}) + task_context_dict = ymir_info_src.get('task_context', {}) + models_list = ymir_info_src['models'] + + best_stage_name = 'default_best_stage' + model_stage_dict = { + best_stage_name: { + 'files': models_list, + 'mAP': task_context_dict.get('mAP', 0), + 'stage_name': best_stage_name, + 'timestamp': int(time.time()), + } + } + + ymir_info_dst = { + 'executor_config': executor_config_dict, + 'task_context': task_context_dict, + 'stages': model_stage_dict, + 'best_stage_name': best_stage_name, + 'package_version': ymir_model_salient_version(_DST_YMIR_VER), + } + + # pack again + model_storage = models.ModelStorage.parse_obj(ymir_info_dst) + new_model_hash = models.pack_and_copy_models(model_storage=model_storage, + model_dir_path=model_work_dir, + model_location=model_work_dir) # avoid hash conflict + shutil.move(os.path.join(model_work_dir, new_model_hash), model_path) + + # cleanup + if os.path.isdir(model_work_dir): + shutil.rmtree(model_work_dir) + + +def _check_model(ymir_info: dict) -> None: + # executor_config: must be dict + # # models: must be list + executor_config = ymir_info['executor_config'] + models_list = ymir_info['models'] + if not executor_config or not isinstance(executor_config, dict) or not models_list or not isinstance( + models_list, list): + raise ValueError('Invalid ymir-info.yaml for model version 1.1.0') diff --git a/ymir/updater/readme.md b/ymir/updater/readme.md new file mode 100644 index 0000000000..619e9cb95e --- /dev/null +++ b/ymir/updater/readme.md @@ -0,0 +1,36 @@ +## ymir-updater User's Guide + +## Prepare for upgrade + +1. Make sure the current running YMIR system version is 1.1.0 . ymir-updater does not support upgrades for other versions. +2. Make sure all training, mining and inference tasks in YMIR have been stopped, and then stop the ymir system: bash ymir.sh stop +It is recommended to back up the directory pointed to by YMIR_PATH in .env, the default is ymir-workplace in the YMIR code directory. +4. Check if you need enough hard disk space for the upgrade: If ymir-workplace takes 500G hard disk space, including 200G for ymir-assets, the rest will be automatically backed up during the upgrade. That is, at least 300G of additional space is needed for the upgrade. +Download the target version of YMIR and modify the .env file according to the configuration of the old version +In particular, MYSQL_INITIAL_USER and MYSQL_INITIAL_PASSWORD are copied directly from the old version. These old values are required to log in +6. If you are on an intranet, or on a network where you cannot connect to dockerhub, you need to get the upgrade image corresponding to the YMIR system first. +If you are using labelfree as a labeling job, please note the correspondence between LabelFree version and YMIR version, YMIR 2.0.0 system needs to run with 2.0.0 LabelFree image + +## Upgrade operation + +After confirming the above preparations, run bash ymir.sh update to upgrade and wait for the upgrade to complete + +## FAQ + +### 1. The upgrade script reports an error: sandbox not exists + +Cause: The YMIR_PATH or BACKEND_SANDBOX_ROOT specified in .env is wrong, or there is . / symbol + +Solution: Check the correctness of YMIR_PATH and BACKEND_SANDBOX_ROOT, and check whether their values appear . / symbols. + +### 2. After upgrade, mongodb fails to start with an error: could not find mysql_initial_user xxxx + +Cause: MYSQL_INITIAL_USER and MYSQL_INITIAL_PASSWORD in the .env file are not set according to the old version + +Solution: Refer to Article 6 of the preparation + +### 3. After the upgrade is completed, or when starting the system after rolling back from the backup, mysql fails to start and reports an error: No permission + +Cause: When backing up ymir-workplace in step 3 of the preparation, the permissions of the ymir-workplace/mysql directory itself and its files may have been changed + +Solution: sudo chown -R 27:sudo ymir-workplace/mysql diff --git a/ymir/updater/readme_zh-CN.md b/ymir/updater/readme_zh-CN.md new file mode 100644 index 0000000000..0ad6c6193c --- /dev/null +++ b/ymir/updater/readme_zh-CN.md @@ -0,0 +1,36 @@ +# ymir-updater 使用指南 + +## 升级前的准备工作 + +1. 确认当前运行的 YMIR 系统版本是 1.1.0 版本,ymir-updater 不支持其他版本的升级 +2. 确认 YMIR 中所有训练,挖掘及推理任务都已经停止,之后再停止 ymir 系统:bash ymir.sh stop +3. 建议备份 .env 的 YMIR_PATH 所指向的目录,默认为 YMIR 代码目录的 ymir-workplace +4. 确认升级所需硬盘空间是否足够:如果 ymir-workplace 占用 500G 硬盘空间,其中 ymir-assets 200G,则其余内容都会在升级过程中自动备份。即至少需要额外 300G 空间进行升级 +5. 下载 YMIR 目标版本,并依据旧版本的配置修改 .env 文件 +特别的,MYSQL_INITIAL_USER 及 MYSQL_INITIAL_PASSWORD 直接将旧版本的值复制过来。需要这些旧值登录 +6. 如果位于内网,或是位于无法连接 dockerhub 的网络环境中,需要先取得与 YMIR 系统对应的升级镜像,镜像名称可以通过 docker-compose.updater.yml 中的 image 配置项得到 +7. 如果使用 labelfree 作为标注工作,请注意 LabelFree 版本与 YMIR 版本的对应关系,YMIR 2.0.0 系统需要搭配 2.0.0 版本的 LabelFree 镜像运行 + +## 升级操作 + +确认上述准备工作做完以后,运行 bash ymir.sh update 进行升级,等待升级完成 + +## 常见问题 + +### 1. 升级脚本报错:sandbox not exists + +原因:.env 中指定的 YMIR_PATH 或 BACKEND_SANDBOX_ROOT 错误,或者出现 ../ 符号 + +解决方案:检查 YMIR_PATH 和 BACKEND_SANDBOX_ROOT 的正确性,检查它们的值是否出现 ../ 符号 + +### 2. 升级完成后,mongodb 启动失败,并报错:could not find mysql_initial_user xxxx + +原因:.env 文件中的 MYSQL_INITIAL_USER 和 MYSQL_INITIAL_PASSWORD 未按旧版本设置 + +解决方案:参考准备工作第 6 条 + +### 3. 升级完成后,或从备份回滚后启动系统时,mysql 启动失败,并报错:No permission + +原因:准备工作第三步备份 ymir-workplace 时,可能更改了 ymir-workplace/mysql 目录本身及其文件的权限设置 + +解决方案:sudo chown -R 27:sudo ymir-workplace/mysql diff --git a/ymir/web/Dockerfile b/ymir/web/Dockerfile index 11268f55fe..32808f945c 100644 --- a/ymir/web/Dockerfile +++ b/ymir/web/Dockerfile @@ -12,11 +12,10 @@ RUN npm run build:dev FROM nginx:alpine COPY --from=builder /app/ymir /usr/share/nginx/html RUN rm /etc/nginx/conf.d/default.conf -#COPY nginx.conf /etc/nginx/conf.d COPY nginx.conf.template /etc/nginx/conf.d +COPY docs /data/ymir/docs COPY docker-entrypoint.sh / RUN chmod +x /docker-entrypoint.sh ENTRYPOINT ["/docker-entrypoint.sh"] CMD ["nginx", "-g", "daemon off;"] - diff --git a/ymir/web/docker-entrypoint.sh b/ymir/web/docker-entrypoint.sh index 689a372cbe..8135c40ff3 100644 --- a/ymir/web/docker-entrypoint.sh +++ b/ymir/web/docker-entrypoint.sh @@ -2,5 +2,6 @@ set -eu envsubst '${LABEL_TOOL_HOST_URL}' < /etc/nginx/conf.d/nginx.conf.template > /etc/nginx/conf.d/nginx.conf +envsubst '${DEPLOY_MODULE_URL}' < /usr/share/nginx/html/config/config.js.template > /usr/share/nginx/html/config/config.js exec "$@" diff --git a/ymir/backend/src/ymir_viz/src/libs/__init__.py b/ymir/web/docs/.nojekyll similarity index 100% rename from ymir/backend/src/ymir_viz/src/libs/__init__.py rename to ymir/web/docs/.nojekyll diff --git a/ymir/web/docs/README.md b/ymir/web/docs/README.md new file mode 100644 index 0000000000..ae9e0a42b0 --- /dev/null +++ b/ymir/web/docs/README.md @@ -0,0 +1,393 @@ +# 简介 + +Hi,您好,欢迎使用YMIR模型生产平台。YMIR系统为算法人员提供端到端的算法研发工具,围绕AI开发过程中所需要数据处理、模型训练等业务需求提供一站式服务,推动算法技术进步。目前,YMIR系统支持目标检测类模型训练,主要用于检测图中每个物体的位置、类别。适合图中有多个主体要识别、或要识别主体位置及数量的场景。 + + +# 专用名词 + +- 类别/Class:类别一般指用户添加到YMIR系统下的关键词,这类关键词通常用于训练、标注,即用户想要在图像中检测出来的目标物体。 + +- 类别别名/Alias:别名一般和类别的主名对应,当用户为某一个类别添加了别名后,该别名对应的标注框将会在训练中按类别主名来分类训练。 + +- 数据标签/Asset Tag :单个图像数据所带有的标签分类,一般指图像的某个属性,如该图像的来源地点、所属场景等。 + +- 标注框标签/Box Tag:单个标注框所带有的标签分类,一般指标注框的某个属性,如该标注框的质量、分辨率等。 + +- 训练目标/Target:训练目标由用户在类别中选取,即当前要训练的模型想要检测的目标物体。 + +- 镜像/DockerImage:运行训练、挖掘和推理任务的环境,YMIR系统提供一些默认镜像,也支持用户自行开发上传。 + +- 标准值/GroundTruth(GT):数据集中正确的标注值,作为待识别目标的参考标注,一般情况下为人工标注。 + +- 预测标注/Prediction:数据集经过模型推理后产生的标注结果,用于评估模型的识别效果。 + +- 迭代流程/Iteration:YMIR提供标准化的模型迭代流程,并且会在每一步操作中帮助用户默认填入上一次的操作结果,普通用户按照既定步骤操作,即可完成完整的模型迭代流程。迭代的主要目标是为了帮助用户获取更优质的训练数据和效果更好的模型。 + +- 模型部署/Deployment:模型即服务。模型部署是指将模型推理细节打包到模型中,通过一套API实现所有深度学习模型的推理工作。提供标准的http接口,支持用户快速集成与验证。 + +# 使用流程 + +YMIR系统的使用流程一般分为两类,一类是系统的元操作,包括数据的管理、处理、分析、标注以及模型的训练、诊断、部署等功能,全程可视化简易操作。一类是系统提供的迭代流程,将模型训练中的关键步骤进行拆解,辅助用户填入数据,支持用户流程化地优化模型,获得最终的训练结果。了解更多迭代相关的操作,请跳转至[模型迭代](README.md#模型迭代)。 + +接下来我们将详细讲述每个步骤的具体操作,如果有其他问题,请发送邮件到 contact.viesc@gmail.com进行反馈。 + +## 创建项目 + +首先,我们需要在【项目管理】中创建项目,YMIR系统以项目为维度进行数据、模型的管理。 + +![create_a_project_1](https://user-images.githubusercontent.com/90443348/197102009-44240d0d-3954-41a0-9644-cff57f43e2bf.png) + +![create_a_project_2](https://user-images.githubusercontent.com/90443348/197102013-5de37168-d6fc-4cf3-ba96-b21343071715.png) + +请注意,项目的训练目标将会默认设为您在启用【迭代流程】时的训练目标。 + +具体的操作视频如下:[创建项目](https://www.bilibili.com/video/BV1734y1e7SM/?spm_id_from=333.337.search-card.all.click) + +## 添加数据集 + +项目创建完成后,在训练之前需要在项目中添加数据集,导入并标注数据。 + +### 添加类别 + +在上传之前确定想要识别哪几种物体,每个类别对应想要在图片中检测出的一种物体,并将对应的类别添加到【类别管理】中。 + +### 准备数据集 + +基于添加好的类别准备数据集,格式要求如下: + +- 仅支持zip格式压缩包文件上传; + +- 互联网内建议<200MB,局域网内上传压缩包大小<1G, 超过1个G的数据集建议用路径导入; + +- 目前支持图片类型为png、jpg、bmp、jpeg,格式不符的图片将不会导入; + +- 如果需要同步导入标注文件,则标注的文件格式需要为Pascal VOC; + +- 压缩包文件内图片文件需放入images文件夹内,标准值文件需放入gt文件夹内,预测标注文件需放入pred文件夹内,且pred文件夹内应包含产生该预测结果的模型信息。gt和pred均为可选,如不上传,则需要该文件夹为空,压缩包内文件结构如下。点击下载示例文件:[Sample.zip](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_v2/sample_dataset.zip?raw=true) + +![sample_zip](https://user-images.githubusercontent.com/90443348/197102085-70a17f1f-bcc9-4557-b9b9-9da1ab295f07.png) + +### 上传数据集 + +在完成了添加类别与数据准备后,点击【添加数据集】按钮,进入添加数据集页面。 + +![create_a_dataset_1](https://user-images.githubusercontent.com/90443348/197102336-57980787-6a1c-4294-8ba8-5080830a8117.png) + +可以通过以下方式导入数据: + +①用户本地数据:支持上传压缩包,或通过网络url、路径导入,其中路径导入需要把数据集文件放到 `ymir-workplace/ymir-sharing` 下面,然后在导入页面填上路径地址`ymir-workplace/ymir-sharing/voc2012`。 + +![create_a_dataset_2](https://user-images.githubusercontent.com/90443348/197102339-895615ed-aba1-4577-8506-818f6327d2b1.png) + +②平台已有数据:支持复制该用户下的其他数据集或导入平台已有的公共数据集。 + +![create_a_dataset_3](https://user-images.githubusercontent.com/90443348/197102341-eb976ef5-bea0-4a35-9a6e-a3aebfdaf1a7.png) + +## 数据挖掘 + +YMIR为用户提供成熟的挖掘算法,数据挖掘的主要目的是为了在未标注的数据中找到有利于模型训练的数据。一般挖掘的目标数据集来源于现场数据或相关场景,通过挖掘后可以在其中找出高价值的优质数据并在标注操作中完成标注,从而在扩充训练集的同时降低标注成本。 + +首先选择要挖掘的数据集,点击[挖掘]操作,创建挖掘任务。 + +![mining_1](https://user-images.githubusercontent.com/90443348/197102561-2c845bef-33e5-4641-ade0-99b82f7a5c4e.png) + +![mining_2](https://user-images.githubusercontent.com/90443348/197102564-91446a87-0936-413a-8e13-ea55a9bb3da1.png) + +挖掘模型应选用期望提升效果的模型,如果缺少模型,应前往【模型训练】或【模型导入】获取,其中topk值为挖掘出的数据总量。 + +## 数据标注 + +上传的数据集如果不包含标注文件或用户需要重新标注,即可进入数据标注页面进行标注。 + +Step 1 首先选择要标注的数据集,点击[标注]操作,创建标注任务。 + +![labelling_1](https://user-images.githubusercontent.com/90443348/197102739-2848473f-f5fc-44dd-9573-c86e43ab30c5.png) + +Step 2 填写标注需要的内容,标注目标可在当前用户下的类别列表中选择,支持上传标注标准文档,如果用户尚未注册标注平台的账户,可点击下方链接跳转至标注平台注册账号。 + +![labelling_2](https://user-images.githubusercontent.com/90443348/197102743-d51bb47f-c85f-4e09-9cbe-b51e7b4ac569.png) + +Step 3 标注任务创建完成后,用户可以通过查看标注数据集的详情,跳转至标注平台自行标注。 + +![labelling_3](https://user-images.githubusercontent.com/90443348/197102745-ea0d9321-d509-4e5c-9770-c80de0620404.png) + +## 数据分析 + +您可从【项目管理】下左侧菜单操作列点击【数据集分析】进入该功能页面。 + +数据分析旨在对您数据集中的图像数据进行质量检测,通过提供客观指标,为您对数据集的下一步操作(标注、训练、诊断等)进行参照引导。 + +整体质检报告将包括对标准值、预测标注两个层面的指标进行统计。 + +分析结果分为整体指标和分布指标两类。整体指标包括数据集存储大小、标注框总数、图片平均标注框数量、以及已标注图片占比四类;分布指标包括图像存储大小分布、图像高宽比分布、图像分辨率分布、图像质量分布、标注框分辨率分布以及类别占比分布六类。 + +可以通过切换数据集查看不同数据集的分析报告,支持多选数据集进行比对。 + +![data_analysis_1](https://user-images.githubusercontent.com/90443348/197102803-b5fbef09-bd8c-4a4b-821a-84758927abca.png) + +## 模型训练 + +### 功能页面 + +您可从【项目管理】下左侧菜单操作列点击【模型训练】进入该功能页面。 + +![training_1](https://user-images.githubusercontent.com/90443348/197102856-846c0195-9f7d-4cdc-9321-84e14baf85b8.png) + +如果有指定的数据集作为训练集,也可以在数据集的右侧操作入口中进入训练页面。 + +![training_2](https://user-images.githubusercontent.com/90443348/197102862-8911e447-6cad-403b-bb37-4cfa974084c4.png) + +### 训练配置 + +![training_3](https://user-images.githubusercontent.com/90443348/197102864-d9b8f5e6-f2b0-447f-9150-f2c9a8c39aa5.png) + +Step 1 选择镜像 + +选择此次训练需要的镜像容器,YMIR提供默认的开发镜像,支持yolo v5训练。如需要其他镜像,可以由管理员前往【我的镜像】-【公共镜像】列表页面拉取更多镜像,具体操作参考[镜像管理](README.md#镜像管理)。 + +Step 2 选择训练集 + +选择您想要用于当前模型训练的数据集,数据集可选列表为当前项目下的数据集,无法跨项目选取。注意请确保选中的数据集已经完成数据标注,否则无法启动训练,在数据集标注质量较高的情况下,可能获得的模型效果也会更好。 + +Step 3 选择训练目标 + +训练目标为您本次训练想要识别的物体类别,仅支持在已选中训练集的类别列表中选择,选择完成之后可点击【计算正负样本】按钮计算当前选中类别在训练集中的占比。 + +Step 4 选择验证集 + +AI模型在训练时,每训练一批数据会进行模型效果检验,以验证集的图片作为验证数据,通过结果反馈去调节训练。因此,需要选择一个与训练目标类别一致的数据集作为验证集,用于模型效果的提升。验证集同样需要已标注的数据,否则会影响最终模型的效果。 + +Step 5 预训练模型 + +预训练模型:在模型迭代训练时,用户在原训练数据上增加了训练数据,可通过加载原训练数据训练出的模型参数进行模型训练。这样可让模型收敛速度变快,训练时间变短,同时在数据集质量较高的情况下,可能获得的模型效果也会更好。 + +注:仅可选择同一训练镜像下训练出的模型作为预训练模型。 + +Step 6 GPU个数 + +目前YMIR仅支持GPU训练,可以在这里调整用于本次训练的GPU数量,合理分配资源。 + +Step 7 超参数配置 + +超参数配置开关默认关闭,建议对深度学习有一定了解的用户根据实际情况考虑使用,超参数配置在提供镜像内置的参数修改功能外,额外提供「最长边长缩放」配置项。 + +- 最长边长缩放:可以输入的数值调整训练数据的图像尺寸,将图像的最长边设为您所设置的数值,其他边长按比例缩放。 + +### 训练模型 + +点击「开始训练」,训练模型。 + +- 训练时间与数据量大小有关,1000张图片可能需要几个小时训练,请耐心等待。 + +- 模型训练过程中,可以到【模型列表】页面查看模型的训练进度。 + +![training_4](https://user-images.githubusercontent.com/90443348/197102867-06367c2f-5b43-40e3-92b7-d46533fabece.png) + +- 想要查看更多的模型训练过程中的信息,可打开【模型详情】页面,点击【训练过程】按钮,查看训练信息。 + +![training_5](https://user-images.githubusercontent.com/90443348/197102868-c3173e47-9e97-49a2-8fd5-6ed98ccdc317.png) + +![training_6](https://user-images.githubusercontent.com/90443348/197102871-01d8e5fa-b037-4855-a3d9-d8c0f7845a48.png) + +## 模型诊断  + +可通过模型推理或者模型诊断了解模型效果: + +### 模型推理 + +通过模型的【推理】操作,在选中的测试集上生成推理结果,支持同时选中多个数据集或模型进行推理。 + +![inference_1](https://user-images.githubusercontent.com/90443348/197103038-2abdac03-9dac-4ed3-a9dc-35c7f21b28b0.png) + +![inference_2](https://user-images.githubusercontent.com/90443348/197103042-9ca5e75a-ce66-47d3-9663-536b4ca52613.png) + +推理完成后,支持对推理结果进行可视化查看。 + +![inference_3](https://user-images.githubusercontent.com/90443348/197103043-10e7a3ae-1638-4880-bd02-e6be9a2534c7.png) + +![inference_4](https://user-images.githubusercontent.com/90443348/197103047-a2d925b6-2bde-4d07-99b1-074687adf081.png) + +![inference_5](https://user-images.githubusercontent.com/90443348/197103048-11531b6d-8a00-423d-8480-f787d2e20c52.png) + +当前可视化结果支持对推理结果的指标评估,包括FP、FN、TP以及MTP,支持按类别进行筛选查看。 + +FP:False Positive,当前测试图片的标准值不包含正确的检测目标,但模型将其错误地识别为了检测目标。即预测结果中被预测为正类的负样本。 + +FN:False Negative,当前测试图片的标准值为正确的检测目标,但模型未识别到或将其错误地识别为了其他目标。即预测结果中被预测为负类的正样本。 + +TP:True Positive,即在目标预测类别下,和标准值匹配的模型预测结果。 + +MTP:Matched True Positive,即在目标预测类别下,和预测结果匹配的标准值。 + +### 模型诊断 + +在【项目管理】的左侧导航栏中找到【模型诊断】模块,在线评估模型的效果。具体操作为:①选中你要评估的模型,②选择测试集(选中的测试集需要在模型上已完成推理,具体步骤参考[模型推理](README.md#模型推理)),③调整评估参数,点击诊断。 + +- 可以通过切换指标来查看不同参数下的模型诊断结果,诊断结果包括mAP、PR曲线、精确率、召回率。显示结果示例如下: + +![diagnosis_1](https://user-images.githubusercontent.com/90443348/197103137-890695ff-564c-4871-bac5-c7a921744ae0.png) + +![diagnosis_2](https://user-images.githubusercontent.com/90443348/197103151-3e1e2723-b01f-47c3-95a1-c952f5d5be4a.png) + +查看模型诊断结果时,需要思考在当前业务场景,更关注精确率与召回率哪个指标。是更希望减少误识别,还是更希望减少漏识别。前者更需要关注精确率的指标,后者更需要关注召回率的指标。评估指标说明如下: + +mAP: mAP(mean average precision)是目标检测(Object Detection)算法中衡量算法效果的指标。对于目标检测任务,每一类object都可以计算出其精确率(Precision)和召回率(Recall),在不同阈值下多次计算/试验,每个类都可以得到一条P-R曲线,可根据曲线下面积可计算 mAP。 + +精确率: 正确预测的物体数与预测物体总数之比。 + +召回率: 正确预测的物体数与真实物体数之比。 + +## 镜像管理 + +镜像管理为进阶功能,目前仅针对管理员开放,镜像管理中支持用户上传自定义镜像,来实现用户理想的训练、挖掘、推理操作。镜像的具体开发标准请参考[镜像开发文档](https://github.com/IndustryEssentials/ymir/tree/master/docker_executor/sample_executor)。 + +### 新增镜像 + +管理员进入【我的镜像】页面, 点击[新增镜像]按钮,填写镜像名称和地址,完成镜像的添加。 + +![docker_1](https://user-images.githubusercontent.com/90443348/197103227-d471bb6f-3a85-4add-8b10-b3563d9fbdab.png) + +也可通过复制公共镜像的方式进行镜像的添加,进入【公共镜像】页面,点击[复制]按钮,修改名称和描述,完成公共镜像的复制。 + +![docker_2](https://user-images.githubusercontent.com/90443348/197103238-1dad9757-8ad4-49b4-a0e1-62b7b88eb5fc.png) + +### 关联镜像 + +用户自定义的训练、挖掘和推理镜像,一般来说需要具有关联性,才可保证操作流程可串联。也就是说用户A制作的训练镜像所训练出的模型,一般情况下无法适配用户B所制作的挖掘或推理镜像。为了便于用户记忆不同类别间的镜像关系,平台特别设计了镜像关联功能。点击训练镜像的[关联]按钮,选择对应的挖掘镜像: + +![docker_3](https://user-images.githubusercontent.com/90443348/197103242-23d6e053-cac4-4e5f-8ccf-7cf37f44efa9.png) + +![docker_4](https://user-images.githubusercontent.com/90443348/197103247-1a6f2fa4-ff72-4a25-82eb-e3757031afab.png) + +注:目前仅支持由训练镜像关联到挖掘镜像。 + +## 模型迭代 + +### 功能概述 + +一个模型很难一次性就训练到最佳的效果,可能需要结合模型推理结果和诊断数据不断扩充数据和调优。 + +为此我们设计了模型迭代功能,下图为一个完整的模型迭代流程,用户通过多次迭代,不断地调整训练数据和算法,多次训练,获得更好的模型效果。 + +![workflow](https://user-images.githubusercontent.com/90443348/197103293-3ad12146-99c3-43ae-adcc-65ec2e90ef23.png) + +开启迭代后,YMIR提供标准化的模型迭代流程,并且会在每一步操作中帮助用户默认填入上一次的操作结果,普通用户按照既定步骤操作,即可完成完整的模型迭代流程。当前操作结果如不符合您的预期,您也可以选择跳过当前操作。 + +### 功能入口 + +创建项目完成后,您可以在【项目概览】页面中点击[系统辅助式模型生产]按钮进入该页面,也可以直接从【项目管理】下左侧菜单操作列点击【项目迭代】进入。 + +![iteration_1](https://user-images.githubusercontent.com/90443348/197103658-51952bc7-1816-4f8e-8de8-cb1bf9e6b936.png) + +### 迭代前准备 + +开启迭代需要用户准备好要使用的数据集以及初始模型,各类数据的作用如下: + +- 训练集:用于训练的初始数据,训练集的类别需要包含当前项目的目标类别。训练集会在每次迭代的过程中通过版本更新的方式不断地扩充。 + +- 挖掘集:这类数据集数量较多且可以尚未标注目标类别,一般来源于现场数据,通过挖掘后可以在其中找出优质数据,用于扩充训练数据。当用户认为挖掘集中已无有价值数据,可以自行替换。 + +- 验证集:用于在模型的训练过程中校验数据,模型迭代过程中使用统一的验 +证集参与训练,可以更好地比对模型训练效果。 + +- 测试集:用于模型训练完成后的效果测试,一般用在模型的推理和诊断环节,便于比对模型在不同的数据环境下的效果。 + +- 初始模型:首次迭代用于挖掘的模型,可以由用户自行导入,也支持由用户通过导入后的训练集自行训练。 + +在迭代准备界面对以上的数据类别分别进行设置,完成后点击[使用迭代功能提升模型效果]按钮,进入迭代流程。 + +![Iteration_2](https://user-images.githubusercontent.com/90443348/197103666-622608e6-56b9-44cf-b936-80899db133b7.png) + +### 迭代流程 + +step 1 挖掘数据准备 + +该操作用于确定待挖掘的数据,在所选挖掘集上进行数据筛选或去重,最终获得的结果就是下一步用于挖掘的数据,此步骤可跳过。 + +![Iteration_pre_1](https://user-images.githubusercontent.com/90443348/197103682-acf60169-d535-4d6d-926a-95ff518e658f.png) + +![Iteration_pre_2](https://user-images.githubusercontent.com/90443348/197103685-fe52fbb7-3db4-4aca-b0a9-05400072062d.png) + +step 2 数据挖掘 + +根据上一步获取到的待挖掘数据, 设置用户想要挖掘的数据量,其他参数均有迭代系统辅助填写,具体操作可参考[数据挖掘](README.md#数据挖掘)。注意,这里用于挖掘的模型是您上次迭代获取到的最终训练模型(如果是第一次迭代,则这里是您设置的初始模型),挖掘任务完成后获取挖掘结果数据,此步骤可跳过。 + +![Iteration_mine_1](https://user-images.githubusercontent.com/90443348/197103678-e9e4a911-6e14-4f08-9736-862f0685696e.png) + +step 3 数据标注 + +挖掘后的结果一般不带有用户想要训练的目标类别标注,这是需要对挖掘结果进行人工标注,在迭代流程中点击[数据标注]按钮,进入标注页面,待标注数据为上一步中的挖掘结果,其他操作参考[数据标注](README.md#数据标注),此步骤可跳过。 + +![Iteration_label](https://user-images.githubusercontent.com/90443348/197103672-6baba1d3-a08a-460f-b2d6-ef2b783dd403.png) + +step 4 更新训练集 + +迭代的主要目的是扩充用户的训练数据,将已经标注好的挖掘结果合并到之前的训练集中,生成新的训练集版本,用于模型训练。 + +![Iteration_merge](https://user-images.githubusercontent.com/90443348/197103675-0b5e1259-4f69-410d-9855-a498cfe3e506.png) + +step 5 模型训练 + +已合并后的训练集需要再次进行训练产生新的模型,注意,这里的验证集是用户在迭代前所设置的验证集,为了保证模型效果的一致性,暂不支持更改,其他操作参考[模型训练](README.md#模型训练)。点击[训练]按钮后获得本次迭代的模型结果。 + +![Iteration_train_1](https://user-images.githubusercontent.com/90443348/197103686-e6eaff89-6b75-4efc-940f-d326cf17ef88.png) + +step 6 下一轮迭代 + +如果效果未达到您的要求,可以点击[开启下一轮迭代]按钮继续进行下一次迭代。每轮迭代的流程一致,可按用户的需求自行跳过某些步骤。 + +step7 查看迭代历史 + +在完成迭代过程后,如果需要查看之前或者当前迭代的信息,可点击[迭代历史]页面,查看历史的迭代信息。 + +![Iteration_history](https://user-images.githubusercontent.com/90443348/197103669-e3aa7c8d-bd6c-4a83-8866-d295df6c9445.png) + +## 模型部署 + +出于性能和速度考虑,算法平台训练产生的模型并不直接使用,而是通过提供一键式模型转换与量化工具,将模型转换至具体硬件平台可使用的模型。 + +### 本地部署 + +step 1 进入【模型列表】页面,点击[发布]按钮,发布完成后请前往【模型部署】模块【我的算法】页面查看发布结果。 + +![release_1](https://user-images.githubusercontent.com/90443348/197104254-ab883862-2bf6-4172-8088-ffd66efdc16e.png) + +![release_2](https://user-images.githubusercontent.com/90443348/197104257-d87414b1-e1c4-4ae1-b2db-d1fcb1d00002.png) + +step 2 进入【我的算法】页面, 对选中的已发布模型点击[部署]按钮,进入【模型部署】页面,选择要部署的设备。设备列表为当前服务器环境下的设备,如需要选择其他设备,请前往【设备列表】页面添加。 + +![deploy_1](https://user-images.githubusercontent.com/90443348/197104304-816db04c-ab77-4a71-947a-1089c3a983dd.png) + +![deploy_2](https://user-images.githubusercontent.com/90443348/197104310-7c4082df-0464-4e5b-8a4d-386a2cc5f5af.png) + +![deploy_3](https://user-images.githubusercontent.com/90443348/197104311-b63e5972-e5da-443e-aa61-07e9287a1ae2.png) + +step 3 部署完成后,可前往设备页面查看模型运行情况。前往【设备列表】页面,点击设备名称,进入设备详情页面查看。在设备的【算法中心】页面可设置算法的开启状态。 + +![device_1](https://user-images.githubusercontent.com/90443348/197104347-9fc33fda-50f2-4059-9147-3b34444179ea.png) + +![device_2](https://user-images.githubusercontent.com/90443348/197104344-4d8a3670-9d9b-4d5c-b37f-61a9653206af.png) + +### 发布到公共算法库 + +step 1 进入【我的算法】页面, 对选中的已发布模型点击[发布到公共算法]按钮,填写信息点击[确定]后,算法会交给后台人工审核打包。 + +![public_alg_1](https://user-images.githubusercontent.com/90443348/197187010-a2d5d212-aacb-4e30-8c20-04ad1f910fab.jpg) + +![public_alg_2](https://user-images.githubusercontent.com/90443348/197187014-bd793823-bfb9-4d50-a4a8-1a78ccd60545.jpg) + +![public_alg_3](https://user-images.githubusercontent.com/90443348/197187016-31fc5b8d-1b35-4a19-849e-7d265cff7ce8.jpg) + +step 2 审核通过后即可前往【模型部署】-【公共算法】页面查看对应的模型。 + +![public_alg_4](https://user-images.githubusercontent.com/90443348/197187018-6ffa9aaf-66e3-40de-8f11-24732ae3af4d.jpg) + +![public_alg_5](https://user-images.githubusercontent.com/90443348/197187022-25b4baa8-2857-4a80-a8cc-bf0f47b0d1f2.jpg) + +step 3 点击该模型进入算法详情页,可输入图片URL试用。 + +![public_alg_6](https://user-images.githubusercontent.com/90443348/197187023-793edcaa-fca5-4a8f-a819-b0ab93bcdd11.jpg) + +![public_alg_7](https://user-images.githubusercontent.com/90443348/197187025-61ecde5b-0285-403b-a8c5-e7d641b9d18b.jpg) + +![public_alg_8](https://user-images.githubusercontent.com/90443348/197187028-fe008729-4eca-4791-816e-e657597e941b.jpg) diff --git a/ymir/web/docs/_navbar.md b/ymir/web/docs/_navbar.md new file mode 100644 index 0000000000..bea54c9677 --- /dev/null +++ b/ymir/web/docs/_navbar.md @@ -0,0 +1,2 @@ +* [En](/en-us/) +* [简体中文](/) diff --git a/ymir/web/docs/_sidebar.md b/ymir/web/docs/_sidebar.md new file mode 100644 index 0000000000..4914f199d9 --- /dev/null +++ b/ymir/web/docs/_sidebar.md @@ -0,0 +1 @@ +- [操作说明](README.md) diff --git a/ymir/web/docs/docsify.min.js b/ymir/web/docs/docsify.min.js new file mode 100644 index 0000000000..eef06bd790 --- /dev/null +++ b/ymir/web/docs/docsify.min.js @@ -0,0 +1 @@ +!function(){function s(n){var r=Object.create(null);return function(e){var t=c(e)?e:JSON.stringify(e);return r[t]||(r[t]=n(e))}}var o=s(function(e){return e.replace(/([A-Z])/g,function(e){return"-"+e.toLowerCase()})}),l=Object.prototype.hasOwnProperty,y=Object.assign||function(e){for(var t=arguments,n=1;n/gm),it=Q(/^data-[\-\w.\u00B7-\uFFFF]/),ot=Q(/^aria-[\-\w]+$/),at=Q(/^(?:(?:(?:f|ht)tps?|mailto|tel|callto|cid|xmpp):|[^a-z]|[a-z+.\-]+(?:[^a-z+.\-:]|$))/i),st=Q(/^(?:\w+script|data):/i),lt=Q(/[\u0000-\u0020\u00A0\u1680\u180E\u2000-\u2029\u205F\u3000]/g),ct="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol&&e!==Symbol.prototype?"symbol":typeof e};function ut(e){if(Array.isArray(e)){for(var t=0,n=Array(e.length);t/i,t))xe(o,e);else{W&&(t=De(t,F," "),t=De(t,C," "));var l=e.nodeName.toLowerCase();if(Re(l,s,t))try{a?e.setAttributeNS(a,o,t):e.setAttribute(o,t),Le(c.removed)}catch(e){}}}Te("afterSanitizeAttributes",e,null)}}function $e(e){var t,n=Se(e);for(Te("beforeSanitizeShadowDOM",e,null);t=n.nextNode();)Te("uponSanitizeShadowNode",t,null),Ee(t)||(t.content instanceof u&&$e(t.content),Oe(t));Te("afterSanitizeShadowDOM",e,null)}return c.sanitize=function(e,t){var n,r=void 0,i=void 0,o=void 0;if((fe=!e)&&(e="\x3c!--\x3e"),"string"!=typeof e&&!Ae(e)){if("function"!=typeof e.toString)throw He("toString is not a function");if("string"!=typeof(e=e.toString()))throw He("dirty is not a string, aborting")}if(!c.isSupported){if("object"===ct(s.toStaticHTML)||"function"==typeof s.toStaticHTML){if("string"==typeof e)return s.toStaticHTML(e);if(Ae(e))return s.toStaticHTML(e.outerHTML)}return e}if(Y||O(t),c.removed=[],"string"==typeof e&&(re=!1),!re)if(e instanceof p)1===(t=(r=_e("\x3c!----\x3e")).ownerDocument.importNode(e,!0)).nodeType&&"BODY"===t.nodeName||"HTML"===t.nodeName?r=t:r.appendChild(t);else{if(!K&&!W&&!V&&-1===e.indexOf("<"))return k&&ee?k.createHTML(e):e;if(!(r=_e(e)))return K?null:w}r&&X&&we(r.firstChild);for(var a=Se(re?e:r);n=a.nextNode();)3===n.nodeType&&n===i||Ee(n)||(n.content instanceof u&&$e(n.content),Oe(n),i=n);if(i=null,re)return e;if(K){if(Q)for(o=S.call(r.ownerDocument);r.firstChild;)o.appendChild(r.firstChild);else o=r;return J&&(o=T.call(l,o,!0)),o}return e=V?r.outerHTML:r.innerHTML,W&&(e=De(e,F," "),e=De(e,C," ")),k&&ee?k.createHTML(e):e},c.setConfig=function(e){O(e),Y=!0},c.clearConfig=function(){ge=null,Y=!1},c.isValidAttribute=function(e,t,n){return ge||O({}),e=Ne(e),t=Ne(t),Re(e,t,n)},c.addHook=function(e,t){"function"==typeof t&&(R[e]=R[e]||[],ze(R[e],t))},c.removeHook=function(e){R[e]&&Le(R[e])},c.removeHooks=function(e){R[e]&&(R[e]=[])},c.removeAllHooks=function(){R={}},c}();function se(e){var t,n=e.loaded,r=e.total,i=e.step;ie||((e=v("div")).classList.add("progress"),a(g,e),ie=e),t=i?80<(t=parseInt(ie.style.width||0,10)+i)?80:t:Math.floor(n/r*100),ie.style.opacity=1,ie.style.width=95<=t?"100%":t+"%",95<=t&&(clearTimeout(oe),oe=setTimeout(function(e){ie.style.opacity=0,ie.style.width="0%"},200))}var le={};function ce(i,e,t){void 0===e&&(e=!1),void 0===t&&(t={});function o(){a.addEventListener.apply(a,arguments)}var n,a=new XMLHttpRequest,r=le[i];if(r)return{then:function(e){return e(r.content,r.opt)},abort:u};for(n in a.open("GET",i),t)l.call(t,n)&&a.setRequestHeader(n,t[n]);return a.send(),{then:function(t,n){var r;void 0===n&&(n=u),e&&(r=setInterval(function(e){return se({step:Math.floor(5*Math.random()+1)})},500),o("progress",se),o("loadend",function(e){se(e),clearInterval(r)})),o("error",n),o("load",function(e){e=e.target;400<=e.status?n(e):(e=le[i]={content:e.response,opt:{updatedAt:a.getResponseHeader("last-modified")}},t(e.content,e.opt))})},abort:function(e){return 4!==a.readyState&&a.abort()}}}function ue(e,t){e.innerHTML=e.innerHTML.replace(/var\(\s*--theme-color.*?\)/g,t)}var pe=f.title;function he(){var e,t=d("section.cover");t&&(e=t.getBoundingClientRect().height,window.pageYOffset>=e||t.classList.contains("hidden")?S(g,"add","sticky"):S(g,"remove","sticky"))}function de(e,t,r,n){var i=[];null!=(t=d(t))&&(i=k(t,"a"));var o,a=decodeURI(e.toURL(e.getCurrentPath()));return i.sort(function(e,t){return t.href.length-e.href.length}).forEach(function(e){var t=decodeURI(e.getAttribute("href")),n=r?e.parentNode:e;e.title=e.title||e.innerText,0!==a.indexOf(t)||o?S(n,"remove","active"):(o=e,S(n,"add","active"))}),n&&(f.title=o?o.title||o.innerText+" - "+pe:pe),o}function fe(e,t){for(var n=0;nthis.end&&e>=this.next}[this.direction]}},{key:"_defaultEase",value:function(e,t,n,r){return(e/=r/2)<1?n/2*e*e+t:-n/2*(--e*(e-2)-1)+t}}]),be);function be(){var e=0l){t=t||p;break}t=p}!t||(n=xe[Re(e,t.getAttribute("data-id"))])&&n!==a&&(a&&a.classList.remove("active"),n.classList.add("active"),a=n,!_e&&g.classList.contains("sticky")&&(s=r.clientHeight,e=a.offsetTop+a.clientHeight+40,n=a.offsetTop>=o.scrollTop&&e<=o.scrollTop+s,a=+e"']/),yt=/[&<>"']/g,bt=/[<>"']|&(?!#?\w+;)/,kt=/[<>"']|&(?!#?\w+;)/g,wt={"&":"&","<":"<",">":">",'"':""","'":"'"};var xt=/&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/gi;function _t(e){return e.replace(xt,function(e,t){return"colon"===(t=t.toLowerCase())?":":"#"===t.charAt(0)?"x"===t.charAt(1)?String.fromCharCode(parseInt(t.substring(2),16)):String.fromCharCode(+t.substring(1)):""})}var St=/(^|[^\[])\^/g;var At=/[^\w:]/g,Tt=/^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;var Et={},Rt=/^[^:]+:\/*[^/]*$/,Ot=/^([^:]+:)[\s\S]*$/,$t=/^([^:]+:\/*[^/]*)[\s\S]*$/;function Ft(e,t){Et[" "+e]||(Rt.test(e)?Et[" "+e]=e+"/":Et[" "+e]=Ct(e,"/",!0));var n=-1===(e=Et[" "+e]).indexOf(":");return"//"===t.substring(0,2)?n?t:e.replace(Ot,"$1")+t:"/"===t.charAt(0)?n?t:e.replace($t,"$1")+t:e+t}function Ct(e,t,n){var r=e.length;if(0===r)return"";for(var i=0;it)n.splice(t);else for(;n.length>=1,e+=e;return n+e},jt=mt.defaults,Ht=Ct,qt=It,Ut=Lt,Bt=I;function Zt(e,t,n){var r=t.href,i=t.title?Ut(t.title):null,t=e[1].replace(/\\([\[\]])/g,"$1");return"!"!==e[0].charAt(0)?{type:"link",raw:n,href:r,title:i,text:t}:{type:"image",raw:n,href:r,title:i,text:Ut(t)}}var Gt=function(){function e(e){this.options=e||jt}return e.prototype.space=function(e){e=this.rules.block.newline.exec(e);if(e)return 1=n.length?e.slice(n.length):e}).join("\n")}(n,t[3]||"");return{type:"code",raw:n,lang:t[2]?t[2].trim():t[2],text:e}}},e.prototype.heading=function(e){var t=this.rules.block.heading.exec(e);if(t){var n=t[2].trim();return/#$/.test(n)&&(e=Ht(n,"#"),!this.options.pedantic&&e&&!/ $/.test(e)||(n=e.trim())),{type:"heading",raw:t[0],depth:t[1].length,text:n}}},e.prototype.nptable=function(e){e=this.rules.block.nptable.exec(e);if(e){var t={type:"table",header:qt(e[1].replace(/^ *| *\| *$/g,"")),align:e[2].replace(/^ *|\| *$/g,"").split(/ *\| */),cells:e[3]?e[3].replace(/\n$/,"").split("\n"):[],raw:e[0]};if(t.header.length===t.align.length){for(var n=t.align.length,r=0;r ?/gm,"");return{type:"blockquote",raw:t[0],text:e}}},e.prototype.list=function(e){e=this.rules.block.list.exec(e);if(e){for(var t,n,r,i,o,a=e[0],s=e[2],l=1d[1].length:r[1].length>d[0].length||3/i.test(e[0])&&(t=!1),!n&&/^<(pre|code|kbd|script)(\s|>)/i.test(e[0])?n=!0:n&&/^<\/(pre|code|kbd|script)(\s|>)/i.test(e[0])&&(n=!1),{type:this.options.sanitize?"text":"html",raw:e[0],inLink:t,inRawBlock:n,text:this.options.sanitize?this.options.sanitizer?this.options.sanitizer(e[0]):Ut(e[0]):e[0]}},e.prototype.link=function(e){var t=this.rules.inline.link.exec(e);if(t){var n=t[2].trim();if(!this.options.pedantic&&/^$/.test(n))return;e=Ht(n.slice(0,-1),"\\");if((n.length-e.length)%2==0)return}else{var r=Bt(t[2],"()");-1$/.test(n)?r.slice(1):r.slice(1,-1)),Zt(t,{href:r?r.replace(this.rules.inline._escapes,"$1"):r,title:o?o.replace(this.rules.inline._escapes,"$1"):o},t[0])}},e.prototype.reflink=function(e,t){if((n=this.rules.inline.reflink.exec(e))||(n=this.rules.inline.nolink.exec(e))){e=(n[2]||n[1]).replace(/\s+/g," ");if((e=t[e.toLowerCase()])&&e.href)return Zt(n,e,n[0]);var n=n[0].charAt(0);return{type:"text",raw:n,text:n}}},e.prototype.strong=function(e,t,n){void 0===n&&(n="");var r=this.rules.inline.strong.start.exec(e);if(r&&(!r[1]||r[1]&&(""===n||this.rules.inline.punctuation.exec(n)))){t=t.slice(-1*e.length);var i,o="**"===r[0]?this.rules.inline.strong.endAst:this.rules.inline.strong.endUnd;for(o.lastIndex=0;null!=(r=o.exec(t));)if(i=this.rules.inline.strong.middle.exec(t.slice(0,r.index+3)))return{type:"strong",raw:e.slice(0,i[0].length),text:e.slice(2,i[0].length-2)}}},e.prototype.em=function(e,t,n){void 0===n&&(n="");var r=this.rules.inline.em.start.exec(e);if(r&&(!r[1]||r[1]&&(""===n||this.rules.inline.punctuation.exec(n)))){t=t.slice(-1*e.length);var i,o="*"===r[0]?this.rules.inline.em.endAst:this.rules.inline.em.endUnd;for(o.lastIndex=0;null!=(r=o.exec(t));)if(i=this.rules.inline.em.middle.exec(t.slice(0,r.index+2)))return{type:"em",raw:e.slice(0,i[0].length),text:e.slice(1,i[0].length-1)}}},e.prototype.codespan=function(e){var t=this.rules.inline.code.exec(e);if(t){var n=t[2].replace(/\n/g," "),r=/[^ ]/.test(n),e=/^ /.test(n)&&/ $/.test(n);return r&&e&&(n=n.substring(1,n.length-1)),n=Ut(n,!0),{type:"codespan",raw:t[0],text:n}}},e.prototype.br=function(e){e=this.rules.inline.br.exec(e);if(e)return{type:"br",raw:e[0]}},e.prototype.del=function(e){e=this.rules.inline.del.exec(e);if(e)return{type:"del",raw:e[0],text:e[2]}},e.prototype.autolink=function(e,t){e=this.rules.inline.autolink.exec(e);if(e){var n,t="@"===e[2]?"mailto:"+(n=Ut(this.options.mangle?t(e[1]):e[1])):n=Ut(e[1]);return{type:"link",raw:e[0],text:n,href:t,tokens:[{type:"text",raw:n,text:n}]}}},e.prototype.url=function(e,t){var n,r,i,o;if(n=this.rules.inline.url.exec(e)){if("@"===n[2])i="mailto:"+(r=Ut(this.options.mangle?t(n[0]):n[0]));else{for(;o=n[0],n[0]=this.rules.inline._backpedal.exec(n[0])[0],o!==n[0];);r=Ut(n[0]),i="www."===n[1]?"http://"+r:r}return{type:"link",raw:n[0],text:r,href:i,tokens:[{type:"text",raw:r,text:r}]}}},e.prototype.inlineText=function(e,t,n){e=this.rules.inline.text.exec(e);if(e){n=t?this.options.sanitize?this.options.sanitizer?this.options.sanitizer(e[0]):Ut(e[0]):e[0]:Ut(this.options.smartypants?n(e[0]):e[0]);return{type:"text",raw:e[0],text:n}}},e}(),It=Dt,I=Nt,Dt=Pt,Nt={newline:/^(?: *(?:\n|$))+/,code:/^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,fences:/^ {0,3}(`{3,}(?=[^`\n]*\n)|~{3,})([^\n]*)\n(?:|([\s\S]*?)\n)(?: {0,3}\1[~`]* *(?:\n+|$)|$)/,hr:/^ {0,3}((?:- *){3,}|(?:_ *){3,}|(?:\* *){3,})(?:\n+|$)/,heading:/^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,blockquote:/^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,list:/^( {0,3})(bull) [\s\S]+?(?:hr|def|\n{2,}(?! )(?! {0,3}bull )\n*|\s*$)/,html:"^ {0,3}(?:<(script|pre|style)[\\s>][\\s\\S]*?(?:[^\\n]*\\n+|$)|comment[^\\n]*(\\n+|$)|<\\?[\\s\\S]*?(?:\\?>\\n*|$)|\\n*|$)|\\n*|$)|)[\\s\\S]*?(?:\\n{2,}|$)|<(?!script|pre|style)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:\\n{2,}|$)|(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:\\n{2,}|$))",def:/^ {0,3}\[(label)\]: *\n? *]+)>?(?:(?: +\n? *| *\n *)(title))? *(?:\n+|$)/,nptable:It,table:It,lheading:/^([^\n]+)\n {0,3}(=+|-+) *(?:\n+|$)/,_paragraph:/^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html| +\n)[^\n]+)*)/,text:/^[^\n]+/,_label:/(?!\s*\])(?:\\[\[\]]|[^\[\]])+/,_title:/(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/};Nt.def=I(Nt.def).replace("label",Nt._label).replace("title",Nt._title).getRegex(),Nt.bullet=/(?:[*+-]|\d{1,9}[.)])/,Nt.item=/^( *)(bull) ?[^\n]*(?:\n(?! *bull ?)[^\n]*)*/,Nt.item=I(Nt.item,"gm").replace(/bull/g,Nt.bullet).getRegex(),Nt.listItemStart=I(/^( *)(bull)/).replace("bull",Nt.bullet).getRegex(),Nt.list=I(Nt.list).replace(/bull/g,Nt.bullet).replace("hr","\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))").replace("def","\\n+(?="+Nt.def.source+")").getRegex(),Nt._tag="address|article|aside|base|basefont|blockquote|body|caption|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr|track|ul",Nt._comment=/|$)/,Nt.html=I(Nt.html,"i").replace("comment",Nt._comment).replace("tag",Nt._tag).replace("attribute",/ +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/).getRegex(),Nt.paragraph=I(Nt._paragraph).replace("hr",Nt.hr).replace("heading"," {0,3}#{1,6} ").replace("|lheading","").replace("blockquote"," {0,3}>").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|!--)").replace("tag",Nt._tag).getRegex(),Nt.blockquote=I(Nt.blockquote).replace("paragraph",Nt.paragraph).getRegex(),Nt.normal=Dt({},Nt),Nt.gfm=Dt({},Nt.normal,{nptable:"^ *([^|\\n ].*\\|.*)\\n {0,3}([-:]+ *\\|[-| :]*)(?:\\n((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)",table:"^ *\\|(.+)\\n {0,3}\\|?( *[-:]+[-| :]*)(?:\\n *((?:(?!\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)"}),Nt.gfm.nptable=I(Nt.gfm.nptable).replace("hr",Nt.hr).replace("heading"," {0,3}#{1,6} ").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|!--)").replace("tag",Nt._tag).getRegex(),Nt.gfm.table=I(Nt.gfm.table).replace("hr",Nt.hr).replace("heading"," {0,3}#{1,6} ").replace("blockquote"," {0,3}>").replace("code"," {4}[^\\n]").replace("fences"," {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n").replace("list"," {0,3}(?:[*+-]|1[.)]) ").replace("html",")|<(?:script|pre|style|!--)").replace("tag",Nt._tag).getRegex(),Nt.pedantic=Dt({},Nt.normal,{html:I("^ *(?:comment *(?:\\n|\\s*$)|<(tag)[\\s\\S]+? *(?:\\n{2,}|\\s*$)|\\s]*)*?/?> *(?:\\n{2,}|\\s*$))").replace("comment",Nt._comment).replace(/tag/g,"(?!(?:a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)\\b)\\w+(?!:|[^\\w\\s@]*@)\\b").getRegex(),def:/^ *\[([^\]]+)\]: *]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,heading:/^(#{1,6})(.*)(?:\n+|$)/,fences:It,paragraph:I(Nt.normal._paragraph).replace("hr",Nt.hr).replace("heading"," *#{1,6} *[^\n]").replace("lheading",Nt.lheading).replace("blockquote"," {0,3}>").replace("|fences","").replace("|list","").replace("|html","").getRegex()});It={escape:/^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,autolink:/^<(scheme:[^\s\x00-\x1f<>]*|email)>/,url:It,tag:"^comment|^|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>|^<\\?[\\s\\S]*?\\?>|^|^",link:/^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,reflink:/^!?\[(label)\]\[(?!\s*\])((?:\\[\[\]]?|[^\[\]\\])+)\]/,nolink:/^!?\[(?!\s*\])((?:\[[^\[\]]*\]|\\[\[\]]|[^\[\]])*)\](?:\[\])?/,reflinkSearch:"reflink|nolink(?!\\()",strong:{start:/^(?:(\*\*(?=[*punctuation]))|\*\*)(?![\s])|__/,middle:/^\*\*(?:(?:(?!overlapSkip)(?:[^*]|\\\*)|overlapSkip)|\*(?:(?!overlapSkip)(?:[^*]|\\\*)|overlapSkip)*?\*)+?\*\*$|^__(?![\s])((?:(?:(?!overlapSkip)(?:[^_]|\\_)|overlapSkip)|_(?:(?!overlapSkip)(?:[^_]|\\_)|overlapSkip)*?_)+?)__$/,endAst:/[^punctuation\s]\*\*(?!\*)|[punctuation]\*\*(?!\*)(?:(?=[punctuation_\s]|$))/,endUnd:/[^\s]__(?!_)(?:(?=[punctuation*\s])|$)/},em:{start:/^(?:(\*(?=[punctuation]))|\*)(?![*\s])|_/,middle:/^\*(?:(?:(?!overlapSkip)(?:[^*]|\\\*)|overlapSkip)|\*(?:(?!overlapSkip)(?:[^*]|\\\*)|overlapSkip)*?\*)+?\*$|^_(?![_\s])(?:(?:(?!overlapSkip)(?:[^_]|\\_)|overlapSkip)|_(?:(?!overlapSkip)(?:[^_]|\\_)|overlapSkip)*?_)+?_$/,endAst:/[^punctuation\s]\*(?!\*)|[punctuation]\*(?!\*)(?:(?=[punctuation_\s]|$))/,endUnd:/[^\s]_(?!_)(?:(?=[punctuation*\s])|$)/},code:/^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,br:/^( {2,}|\\)\n(?!\s*$)/,del:It,text:/^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\?@\\[\\]`^{|}~"};It.punctuation=I(It.punctuation).replace(/punctuation/g,It._punctuation).getRegex(),It._blockSkip="\\[[^\\]]*?\\]\\([^\\)]*?\\)|`[^`]*?`|<[^>]*?>",It._overlapSkip="__[^_]*?__|\\*\\*\\[^\\*\\]*?\\*\\*",It._comment=I(Nt._comment).replace("(?:--\x3e|$)","--\x3e").getRegex(),It.em.start=I(It.em.start).replace(/punctuation/g,It._punctuation).getRegex(),It.em.middle=I(It.em.middle).replace(/punctuation/g,It._punctuation).replace(/overlapSkip/g,It._overlapSkip).getRegex(),It.em.endAst=I(It.em.endAst,"g").replace(/punctuation/g,It._punctuation).getRegex(),It.em.endUnd=I(It.em.endUnd,"g").replace(/punctuation/g,It._punctuation).getRegex(),It.strong.start=I(It.strong.start).replace(/punctuation/g,It._punctuation).getRegex(),It.strong.middle=I(It.strong.middle).replace(/punctuation/g,It._punctuation).replace(/overlapSkip/g,It._overlapSkip).getRegex(),It.strong.endAst=I(It.strong.endAst,"g").replace(/punctuation/g,It._punctuation).getRegex(),It.strong.endUnd=I(It.strong.endUnd,"g").replace(/punctuation/g,It._punctuation).getRegex(),It.blockSkip=I(It._blockSkip,"g").getRegex(),It.overlapSkip=I(It._overlapSkip,"g").getRegex(),It._escapes=/\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/g,It._scheme=/[a-zA-Z][a-zA-Z0-9+.-]{1,31}/,It._email=/[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/,It.autolink=I(It.autolink).replace("scheme",It._scheme).replace("email",It._email).getRegex(),It._attribute=/\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/,It.tag=I(It.tag).replace("comment",It._comment).replace("attribute",It._attribute).getRegex(),It._label=/(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/,It._href=/<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/,It._title=/"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/,It.link=I(It.link).replace("label",It._label).replace("href",It._href).replace("title",It._title).getRegex(),It.reflink=I(It.reflink).replace("label",It._label).getRegex(),It.reflinkSearch=I(It.reflinkSearch,"g").replace("reflink",It.reflink).replace("nolink",It.nolink).getRegex(),It.normal=Dt({},It),It.pedantic=Dt({},It.normal,{strong:{start:/^__|\*\*/,middle:/^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,endAst:/\*\*(?!\*)/g,endUnd:/__(?!_)/g},em:{start:/^_|\*/,middle:/^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/,endAst:/\*(?!\*)/g,endUnd:/_(?!_)/g},link:I(/^!?\[(label)\]\((.*?)\)/).replace("label",It._label).getRegex(),reflink:I(/^!?\[(label)\]\s*\[([^\]]*)\]/).replace("label",It._label).getRegex()}),It.gfm=Dt({},It.normal,{escape:I(It.escape).replace("])","~|])").getRegex(),_extended_email:/[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,url:/^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,_backpedal:/(?:[^?!.,:;*_~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_~)]+(?!$))+/,del:/^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,text:/^([`~]+|[^`~])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\'+(n?e:nn(e,!0))+"\n":"
"+(n?e:nn(e,!0))+"
\n"},e.prototype.blockquote=function(e){return"
\n"+e+"
\n"},e.prototype.html=function(e){return e},e.prototype.heading=function(e,t,n,r){return this.options.headerIds?"'+e+"\n":""+e+"\n"},e.prototype.hr=function(){return this.options.xhtml?"
\n":"
\n"},e.prototype.list=function(e,t,n){var r=t?"ol":"ul";return"<"+r+(t&&1!==n?' start="'+n+'"':"")+">\n"+e+"\n"},e.prototype.listitem=function(e){return"
  • "+e+"
  • \n"},e.prototype.checkbox=function(e){return" "},e.prototype.paragraph=function(e){return"

    "+e+"

    \n"},e.prototype.table=function(e,t){return"\n\n"+e+"\n"+(t=t&&""+t+"")+"
    \n"},e.prototype.tablerow=function(e){return"\n"+e+"\n"},e.prototype.tablecell=function(e,t){var n=t.header?"th":"td";return(t.align?"<"+n+' align="'+t.align+'">':"<"+n+">")+e+"\n"},e.prototype.strong=function(e){return""+e+""},e.prototype.em=function(e){return""+e+""},e.prototype.codespan=function(e){return""+e+""},e.prototype.br=function(){return this.options.xhtml?"
    ":"
    "},e.prototype.del=function(e){return""+e+""},e.prototype.link=function(e,t,n){if(null===(e=tn(this.options.sanitize,this.options.baseUrl,e)))return n;e='"},e.prototype.image=function(e,t,n){if(null===(e=tn(this.options.sanitize,this.options.baseUrl,e)))return n;n=''+n+'":">"},e.prototype.text=function(e){return e},e}(),on=function(){function e(){}return e.prototype.strong=function(e){return e},e.prototype.em=function(e){return e},e.prototype.codespan=function(e){return e},e.prototype.del=function(e){return e},e.prototype.html=function(e){return e},e.prototype.text=function(e){return e},e.prototype.link=function(e,t,n){return""+n},e.prototype.image=function(e,t,n){return""+n},e.prototype.br=function(){return""},e}(),an=function(){function e(){this.seen={}}return e.prototype.serialize=function(e){return e.toLowerCase().trim().replace(/<[!\/a-z].*?>/gi,"").replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g,"").replace(/\s/g,"-")},e.prototype.getNextSafeSlug=function(e,t){var n=e,r=0;if(this.seen.hasOwnProperty(n))for(r=this.seen[e];n=e+"-"+ ++r,this.seen.hasOwnProperty(n););return t||(this.seen[e]=r,this.seen[n]=0),n},e.prototype.slug=function(e,t){void 0===t&&(t={});var n=this.serialize(e);return this.getNextSafeSlug(n,t.dryrun)},e}(),sn=mt.defaults,ln=zt,cn=function(){function n(e){this.options=e||sn,this.options.renderer=this.options.renderer||new rn,this.renderer=this.options.renderer,this.renderer.options=this.options,this.textRenderer=new on,this.slugger=new an}return n.parse=function(e,t){return new n(t).parse(e)},n.parseInline=function(e,t){return new n(t).parseInline(e)},n.prototype.parse=function(e,t){void 0===t&&(t=!0);for(var n,r,i,o,a,s,l,c,u,p,h,d,f,g,m,v="",y=e.length,b=0;bAn error occurred:

    "+hn(e.message+"",!0)+"
    ";throw e}}fn.options=fn.setOptions=function(e){return un(fn.defaults,e),dn(fn.defaults),fn},fn.getDefaults=Lt,fn.defaults=mt,fn.use=function(o){var t,e=un({},o);if(o.renderer){var n,a=fn.defaults.renderer||new rn;for(n in o.renderer)!function(r){var i=a[r];a[r]=function(){for(var e=[],t=arguments.length;t--;)e[t]=arguments[t];var n=o.renderer[r].apply(a,e);return!1===n&&(n=i.apply(a,e)),n}}(n);e.renderer=a}if(o.tokenizer){var i,s=fn.defaults.tokenizer||new Gt;for(i in o.tokenizer)!function(){var r=s[i];s[i]=function(){for(var e=[],t=arguments.length;t--;)e[t]=arguments[t];var n=o.tokenizer[i].apply(s,e);return!1===n&&(n=r.apply(s,e)),n}}();e.tokenizer=s}o.walkTokens&&(t=fn.defaults.walkTokens,e.walkTokens=function(e){o.walkTokens(e),t&&t(e)}),fn.setOptions(e)},fn.walkTokens=function(e,t){for(var n=0,r=e;nAn error occurred:

    "+hn(e.message+"",!0)+"
    ";throw e}},fn.Parser=cn,fn.parser=cn.parse,fn.Renderer=rn,fn.TextRenderer=on,fn.Lexer=Jt,fn.lexer=Jt.lex,fn.Tokenizer=Gt,fn.Slugger=an;var gn=fn.parse=fn;function mn(e,n){if(void 0===n&&(n='
      {inner}
    '),!e||!e.length)return"";var r="";return e.forEach(function(e){var t=e.title.replace(/(<([^>]+)>)/g,"");r+='
  • '+e.title+"
  • ",e.children&&(r+=mn(e.children,n))}),n.replace("{inner}",r)}function vn(e,t){return'

    '+t.slice(5).trim()+"

    "}function yn(e,r){var i=[],o={};return e.forEach(function(e){var t=e.level||1,n=t-1;r?@[\]^`{|}~]/g;function wn(e){return e.toLowerCase()}function xn(e){if("string"!=typeof e)return"";var t=e.trim().replace(/[A-Z]+/g,wn).replace(/<[^>]+>/g,"").replace(kn,"").replace(/\s/g,"-").replace(/-+/g,"-").replace(/^(\d)/,"_$1"),e=bn[t],e=l.call(bn,t)?e+1:0;return(bn[t]=e)&&(t=t+"-"+e),t}function _n(e,t){return''+t+''}function Sn(e){void 0===e&&(e="");var r={};return{str:e=e&&e.replace(/^('|")/,"").replace(/('|")$/,"").replace(/(?:^|\s):([\w-]+:?)=?([\w-%]+)?/g,function(e,t,n){return-1===t.indexOf(":")?(r[t]=n&&n.replace(/"/g,"")||!0,""):e}).trim(),config:r}}function An(e){return void 0===e&&(e=""),e.replace(/(<\/?a.*?>)/gi,"")}xn.clear=function(){bn={}};var Tn,En=ft(function(e){var a,s,l,c,u,r,t,i=function(l){var c=/\blang(?:uage)?-([\w-]+)\b/i,t=0,$={manual:l.Prism&&l.Prism.manual,disableWorkerMessageHandler:l.Prism&&l.Prism.disableWorkerMessageHandler,util:{encode:function e(t){return t instanceof F?new F(t.type,e(t.content),t.alias):Array.isArray(t)?t.map(e):t.replace(/&/g,"&").replace(/=a.reach);y+=v.value.length,v=v.next){var b=v.value;if(n.length>t.length)return;if(!(b instanceof F)){var k,w=1;if(f){if(!(k=C(m,y,t,d)))break;var x=k.index,_=k.index+k[0].length,S=y;for(S+=v.value.length;S<=x;)v=v.next,S+=v.value.length;if(S-=v.value.length,y=S,v.value instanceof F)continue;for(var A=v;A!==n.tail&&(S<_||"string"==typeof A.value);A=A.next)w++,S+=A.value.length;w--,b=t.slice(y,S),k.index-=y}else if(!(k=C(m,0,b,d)))continue;var x=k.index,T=k[0],E=b.slice(0,x),R=b.slice(x+T.length),O=y+b.length;a&&O>a.reach&&(a.reach=O);var b=v.prev;E&&(b=L(n,b,E),y+=E.length),z(n,b,w);var T=new F(s,h?$.tokenize(T,h):T,g,T);v=L(n,b,T),R&&L(n,v,R),1"+i.content+""},!l.document)return l.addEventListener&&($.disableWorkerMessageHandler||l.addEventListener("message",function(e){var t=JSON.parse(e.data),n=t.language,e=t.code,t=t.immediateClose;l.postMessage($.highlight(e,$.languages[n],n)),t&&l.close()},!1)),$;var e,n=$.util.currentScript();function r(){$.manual||$.highlightAll()}return n&&($.filename=n.src,n.hasAttribute("data-manual")&&($.manual=!0)),$.manual||("loading"===(e=document.readyState)||"interactive"===e&&n&&n.defer?document.addEventListener("DOMContentLoaded",r):window.requestAnimationFrame?window.requestAnimationFrame(r):window.setTimeout(r,16)),$}("undefined"!=typeof window?window:"undefined"!=typeof WorkerGlobalScope&&self instanceof WorkerGlobalScope?self:{});function p(e,t){var n=(n=e.className).replace(r," ")+" language-"+t;e.className=n.replace(/\s+/g," ").trim()}e.exports&&(e.exports=i),void 0!==dt&&(dt.Prism=i),i.languages.markup={comment://,prolog:/<\?[\s\S]+?\?>/,doctype:{pattern:/"'[\]]|"[^"]*"|'[^']*')+(?:\[(?:[^<"'\]]|"[^"]*"|'[^']*'|<(?!!--)|)*\]\s*)?>/i,greedy:!0,inside:{"internal-subset":{pattern:/(\[)[\s\S]+(?=\]>$)/,lookbehind:!0,greedy:!0,inside:null},string:{pattern:/"[^"]*"|'[^']*'/,greedy:!0},punctuation:/^$|[[\]]/,"doctype-tag":/^DOCTYPE/,name:/[^\s<>'"]+/}},cdata://i,tag:{pattern:/<\/?(?!\d)[^\s>\/=$<%]+(?:\s(?:\s*[^\s>\/=]+(?:\s*=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+(?=[\s>]))|(?=[\s/>])))+)?\s*\/?>/,greedy:!0,inside:{tag:{pattern:/^<\/?[^\s>\/]+/,inside:{punctuation:/^<\/?/,namespace:/^[^\s>\/:]+:/}},"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}},punctuation:/\/?>/,"attr-name":{pattern:/[^\s>\/]+/,inside:{namespace:/^[^\s>\/:]+:/}}}},entity:[{pattern:/&[\da-z]{1,8};/i,alias:"named-entity"},/&#x?[\da-f]{1,8};/i]},i.languages.markup.tag.inside["attr-value"].inside.entity=i.languages.markup.entity,i.languages.markup.doctype.inside["internal-subset"].inside=i.languages.markup,i.hooks.add("wrap",function(e){"entity"===e.type&&(e.attributes.title=e.content.replace(/&/,"&"))}),Object.defineProperty(i.languages.markup.tag,"addInlined",{value:function(e,t){var n={};n["language-"+t]={pattern:/(^$)/i,lookbehind:!0,inside:i.languages[t]},n.cdata=/^$/i;n={"included-cdata":{pattern://i,inside:n}};n["language-"+t]={pattern:/[\s\S]+/,inside:i.languages[t]};t={};t[e]={pattern:RegExp(/(<__[^>]*>)(?:))*\]\]>|(?!)/.source.replace(/__/g,function(){return e}),"i"),lookbehind:!0,greedy:!0,inside:n},i.languages.insertBefore("markup","cdata",t)}}),i.languages.html=i.languages.markup,i.languages.mathml=i.languages.markup,i.languages.svg=i.languages.markup,i.languages.xml=i.languages.extend("markup",{}),i.languages.ssml=i.languages.xml,i.languages.atom=i.languages.xml,i.languages.rss=i.languages.xml,function(e){var t=/("|')(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/;e.languages.css={comment:/\/\*[\s\S]*?\*\//,atrule:{pattern:/@[\w-](?:[^;{\s]|\s+(?![\s{]))*(?:;|(?=\s*\{))/,inside:{rule:/^@[\w-]+/,"selector-function-argument":{pattern:/(\bselector\s*\(\s*(?![\s)]))(?:[^()\s]|\s+(?![\s)])|\((?:[^()]|\([^()]*\))*\))+(?=\s*\))/,lookbehind:!0,alias:"selector"},keyword:{pattern:/(^|[^\w-])(?:and|not|only|or)(?![\w-])/,lookbehind:!0}}},url:{pattern:RegExp("\\burl\\((?:"+t.source+"|"+/(?:[^\\\r\n()"']|\\[\s\S])*/.source+")\\)","i"),greedy:!0,inside:{function:/^url/i,punctuation:/^\(|\)$/,string:{pattern:RegExp("^"+t.source+"$"),alias:"url"}}},selector:RegExp("[^{}\\s](?:[^{};\"'\\s]|\\s+(?![\\s{])|"+t.source+")*(?=\\s*\\{)"),string:{pattern:t,greedy:!0},property:/(?!\s)[-_a-z\xA0-\uFFFF](?:(?!\s)[-\w\xA0-\uFFFF])*(?=\s*:)/i,important:/!important\b/i,function:/[-a-z0-9]+(?=\()/i,punctuation:/[(){};:,]/},e.languages.css.atrule.inside.rest=e.languages.css;t=e.languages.markup;t&&(t.tag.addInlined("style","css"),e.languages.insertBefore("inside","attr-value",{"style-attr":{pattern:/(^|["'\s])style\s*=\s*(?:"[^"]*"|'[^']*')/i,lookbehind:!0,inside:{"attr-value":{pattern:/=\s*(?:"[^"]*"|'[^']*'|[^\s'">=]+)/,inside:{style:{pattern:/(["'])[\s\S]+(?=["']$)/,lookbehind:!0,alias:"language-css",inside:e.languages.css},punctuation:[{pattern:/^=/,alias:"attr-equals"},/"|'/]}},"attr-name":/^style/i}}},t.tag))}(i),i.languages.clike={comment:[{pattern:/(^|[^\\])\/\*[\s\S]*?(?:\*\/|$)/,lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/(["'])(?:\\(?:\r\n|[\s\S])|(?!\1)[^\\\r\n])*\1/,greedy:!0},"class-name":{pattern:/(\b(?:class|interface|extends|implements|trait|instanceof|new)\s+|\bcatch\s+\()[\w.\\]+/i,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:if|else|while|do|for|return|in|instanceof|function|new|try|throw|catch|finally|null|break|continue)\b/,boolean:/\b(?:true|false)\b/,function:/\w+(?=\()/,number:/\b0x[\da-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:e[+-]?\d+)?/i,operator:/[<>]=?|[!=]=?=?|--?|\+\+?|&&?|\|\|?|[?*/~^%]/,punctuation:/[{}[\];(),.:]/},i.languages.javascript=i.languages.extend("clike",{"class-name":[i.languages.clike["class-name"],{pattern:/(^|[^$\w\xA0-\uFFFF])(?!\s)[_$A-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\.(?:prototype|constructor))/,lookbehind:!0}],keyword:[{pattern:/((?:^|})\s*)(?:catch|finally)\b/,lookbehind:!0},{pattern:/(^|[^.]|\.\.\.\s*)\b(?:as|async(?=\s*(?:function\b|\(|[$\w\xA0-\uFFFF]|$))|await|break|case|class|const|continue|debugger|default|delete|do|else|enum|export|extends|for|from|function|(?:get|set)(?=\s*[\[$\w\xA0-\uFFFF])|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)\b/,lookbehind:!0}],function:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*(?:\.\s*(?:apply|bind|call)\s*)?\()/,number:/\b(?:(?:0[xX](?:[\dA-Fa-f](?:_[\dA-Fa-f])?)+|0[bB](?:[01](?:_[01])?)+|0[oO](?:[0-7](?:_[0-7])?)+)n?|(?:\d(?:_\d)?)+n|NaN|Infinity)\b|(?:\b(?:\d(?:_\d)?)+\.?(?:\d(?:_\d)?)*|\B\.(?:\d(?:_\d)?)+)(?:[Ee][+-]?(?:\d(?:_\d)?)+)?/,operator:/--|\+\+|\*\*=?|=>|&&=?|\|\|=?|[!=]==|<<=?|>>>?=?|[-+*/%&|^!=<>]=?|\.{3}|\?\?=?|\?\.?|[~:]/}),i.languages.javascript["class-name"][0].pattern=/(\b(?:class|interface|extends|implements|instanceof|new)\s+)[\w.\\]+/,i.languages.insertBefore("javascript","keyword",{regex:{pattern:/((?:^|[^$\w\xA0-\uFFFF."'\])\s]|\b(?:return|yield))\s*)\/(?:\[(?:[^\]\\\r\n]|\\.)*]|\\.|[^/\\\[\r\n])+\/[gimyus]{0,6}(?=(?:\s|\/\*(?:[^*]|\*(?!\/))*\*\/)*(?:$|[\r\n,.;:})\]]|\/\/))/,lookbehind:!0,greedy:!0,inside:{"regex-source":{pattern:/^(\/)[\s\S]+(?=\/[a-z]*$)/,lookbehind:!0,alias:"language-regex",inside:i.languages.regex},"regex-flags":/[a-z]+$/,"regex-delimiter":/^\/|\/$/}},"function-variable":{pattern:/#?(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*[=:]\s*(?:async\s*)?(?:\bfunction\b|(?:\((?:[^()]|\([^()]*\))*\)|(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)\s*=>))/,alias:"function"},parameter:[{pattern:/(function(?:\s+(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*)?\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\))/,lookbehind:!0,inside:i.languages.javascript},{pattern:/(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*(?=\s*=>)/i,inside:i.languages.javascript},{pattern:/(\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*=>)/,lookbehind:!0,inside:i.languages.javascript},{pattern:/((?:\b|\s|^)(?!(?:as|async|await|break|case|catch|class|const|continue|debugger|default|delete|do|else|enum|export|extends|finally|for|from|function|get|if|implements|import|in|instanceof|interface|let|new|null|of|package|private|protected|public|return|set|static|super|switch|this|throw|try|typeof|undefined|var|void|while|with|yield)(?![$\w\xA0-\uFFFF]))(?:(?!\s)[_$a-zA-Z\xA0-\uFFFF](?:(?!\s)[$\w\xA0-\uFFFF])*\s*)\(\s*|\]\s*\(\s*)(?!\s)(?:[^()\s]|\s+(?![\s)])|\([^()]*\))+(?=\s*\)\s*\{)/,lookbehind:!0,inside:i.languages.javascript}],constant:/\b[A-Z](?:[A-Z_]|\dx?)*\b/}),i.languages.insertBefore("javascript","string",{"template-string":{pattern:/`(?:\\[\s\S]|\${(?:[^{}]|{(?:[^{}]|{[^}]*})*})+}|(?!\${)[^\\`])*`/,greedy:!0,inside:{"template-punctuation":{pattern:/^`|`$/,alias:"string"},interpolation:{pattern:/((?:^|[^\\])(?:\\{2})*)\${(?:[^{}]|{(?:[^{}]|{[^}]*})*})+}/,lookbehind:!0,inside:{"interpolation-punctuation":{pattern:/^\${|}$/,alias:"punctuation"},rest:i.languages.javascript}},string:/[\s\S]+/}}}),i.languages.markup&&i.languages.markup.tag.addInlined("script","javascript"),i.languages.js=i.languages.javascript,"undefined"!=typeof self&&self.Prism&&self.document&&(Element.prototype.matches||(Element.prototype.matches=Element.prototype.msMatchesSelector||Element.prototype.webkitMatchesSelector),a=window.Prism,s={js:"javascript",py:"python",rb:"ruby",ps1:"powershell",psm1:"powershell",sh:"bash",bat:"batch",h:"c",tex:"latex"},u="pre[data-src]:not(["+(l="data-src-status")+'="loaded"]):not(['+l+'="'+(c="loading")+'"])',r=/\blang(?:uage)?-([\w-]+)\b/i,a.hooks.add("before-highlightall",function(e){e.selector+=", "+u}),a.hooks.add("before-sanity-check",function(e){var t,n,r,i,o=e.element;o.matches(u)&&(e.code="",o.setAttribute(l,c),(t=o.appendChild(document.createElement("CODE"))).textContent="Loading…",n=o.getAttribute("data-src"),"none"===(e=e.language)&&(r=(/\.(\w+)$/.exec(n)||[,"none"])[1],e=s[r]||r),p(t,e),p(o,e),(r=a.plugins.autoloader)&&r.loadLanguages(e),(i=new XMLHttpRequest).open("GET",n,!0),i.onreadystatechange=function(){4==i.readyState&&(i.status<400&&i.responseText?(o.setAttribute(l,"loaded"),t.textContent=i.responseText,a.highlightElement(t)):(o.setAttribute(l,"failed"),400<=i.status?t.textContent="✖ Error "+i.status+" while fetching file: "+i.statusText:t.textContent="✖ Error: File does not exist or is empty"))},i.send(null))}),t=!(a.plugins.fileHighlight={highlight:function(e){for(var t,n=(e||document).querySelectorAll(u),r=0;t=n[r++];)a.highlightElement(t)}}),a.fileHighlight=function(){t||(console.warn("Prism.fileHighlight is deprecated. Use `Prism.plugins.fileHighlight.highlight` instead."),t=!0),a.plugins.fileHighlight.highlight.apply(this,arguments)})});function Rn(e,t){return"___"+e.toUpperCase()+t+"___"}Tn=Prism,Object.defineProperties(Tn.languages["markup-templating"]={},{buildPlaceholders:{value:function(r,i,e,o){var a;r.language===i&&(a=r.tokenStack=[],r.code=r.code.replace(e,function(e){if("function"==typeof o&&!o(e))return e;for(var t,n=a.length;-1!==r.code.indexOf(t=Rn(i,n));)++n;return a[n]=e,t}),r.grammar=Tn.languages.markup)}},tokenizePlaceholders:{value:function(c,u){var p,h;c.language===u&&c.tokenStack&&(c.grammar=Tn.languages[u],p=0,h=Object.keys(c.tokenStack),function e(t){for(var n=0;n=h.length);n++){var r,i,o,a,s,l=t[n];"string"==typeof l||l.content&&"string"==typeof l.content?(i=h[p],o=c.tokenStack[i],r="string"==typeof l?l:l.content,s=Rn(u,i),-1<(a=r.indexOf(s))&&(++p,i=r.substring(0,a),o=new Tn.Token(u,Tn.tokenize(o,c.grammar),"language-"+u,o),a=r.substring(a+s.length),s=[],i&&s.push.apply(s,e([i])),s.push(o),a&&s.push.apply(s,e([a])),"string"==typeof l?t.splice.apply(t,[n,1].concat(s)):l.content=s)):l.content&&e(l.content)}return t}(c.tokens))}}});function On(i,e){var o=this;this.config=i,this.router=e,this.cacheTree={},this.toc=[],this.cacheTOC={},this.linkTarget=i.externalLinkTarget||"_blank",this.linkRel="_blank"===this.linkTarget?i.externalLinkRel||"noopener":"",this.contentBase=e.getBasePath();var t=this._initRenderer();this.heading=t.heading;var a=r(e=i.markdown||{})?e(gn,t):(gn.setOptions(y(e,{renderer:y(t,e.renderer)})),gn);this._marked=a,this.compile=function(n){var r=!0,e=s(function(e){r=!1;var t="";return n?(t=c(n)?a(n):a.parser(n),t=i.noEmoji?t:t.replace(/:\+1:/g,":thumbsup:").replace(/:-1:/g,":thumbsdown:").replace(/<(pre|template|code)[^>]*?>[\s\S]+?<\/(pre|template|code)>/g,function(e){return e.replace(/:/g,"__colon__")}).replace(/:(\w+?):/gi,window.emojify||_n).replace(/__colon__/g,":"),xn.clear(),t):n})(n),t=o.router.parse().file;return r?o.toc=o.cacheTOC[t]:o.cacheTOC[t]=[].concat(o.toc),e}}var $n={},Fn={markdown:function(e){return{url:e}},mermaid:function(e){return{url:e}},iframe:function(e,t){return{html:'"}},video:function(e,t){return{html:'"}},audio:function(e,t){return{html:'"}},code:function(e,t){var n=e.match(/\.(\w+)$/);return"md"===(n=t||n&&n[1])&&(n="markdown"),{url:e,lang:n}}};On.prototype.compileEmbed=function(e,t){var n,r,i=Sn(t),o=i.str,i=i.config;if(t=o,i.include)return O(e)||(e=N(this.contentBase,F(this.router.getCurrentPath()),e)),i.type&&(r=Fn[i.type])?(n=r.call(this,e,t)).type=i.type:(r="code",/\.(md|markdown)/.test(e)?r="markdown":/\.mmd/.test(e)?r="mermaid":/\.html?/.test(e)?r="iframe":/\.(mp4|ogg)/.test(e)?r="video":/\.mp3/.test(e)&&(r="audio"),(n=Fn[r].call(this,e,t)).type=r),n.fragment=i.fragment,n},On.prototype._matchNotCompileLink=function(e){for(var t=this.config.noCompileLinks||[],n=0;n/g.test(r)&&(r=r.replace("\x3c!-- {docsify-ignore} --\x3e",""),e.title=An(r),e.ignoreSubHeading=!0),/{docsify-ignore}/g.test(r)&&(r=r.replace("{docsify-ignore}",""),e.title=An(r),e.ignoreSubHeading=!0),//g.test(r)&&(r=r.replace("\x3c!-- {docsify-ignore-all} --\x3e",""),e.title=An(r),e.ignoreAllSubs=!0),/{docsify-ignore-all}/g.test(r)&&(r=r.replace("{docsify-ignore-all}",""),e.title=An(r),e.ignoreAllSubs=!0);n=xn(i.id||r),i=o.toURL(o.getCurrentPath(),{id:n});return e.slug=i,h.toc.push(e),"'+r+""},i.code={renderer:e}.renderer.code=function(e,t){void 0===t&&(t="markup");var n=En.languages[t]||En.languages.markup;return'
    '+En.highlight(e.replace(/@DOCSIFY_QM@/g,"`"),n,t)+"
    "},i.link=(n=(t={renderer:e,router:o,linkTarget:t,linkRel:n,compilerClass:h}).renderer,a=t.router,s=t.linkTarget,l=t.linkRel,c=t.compilerClass,n.link=function(e,t,n){void 0===t&&(t="");var r=[],i=Sn(t),o=i.str,i=i.config;return s=i.target||s,l="_blank"===s?c.config.externalLinkRel||"noopener":"",t=o,O(e)||c._matchNotCompileLink(e)||i.ignore?(O(e)||"./"!==e.slice(0,2)||(e=document.URL.replace(/\/(?!.*\/).*/,"/").replace("#/./","")+e),r.push(0===e.indexOf("mailto:")?"":'target="'+s+'"'),r.push(0!==e.indexOf("mailto:")&&""!==l?' rel="'+l+'"':"")):(e===c.config.homepage&&(e="README"),e=a.toURL(e,null,a.getCurrentPath())),i.crossorgin&&"_self"===s&&"history"===c.config.routerMode&&-1===c.config.crossOriginLinks.indexOf(e)&&c.config.crossOriginLinks.push(e),i.disabled&&(r.push("disabled"),e="javascript:void(0)"),i.class&&r.push('class="'+i.class+'"'),i.id&&r.push('id="'+i.id+'"'),t&&r.push('title="'+t+'"'),'"+n+""}),i.paragraph={renderer:e}.renderer.paragraph=function(e){e=/^!>/.test(e)?vn("tip",e):/^\?>/.test(e)?vn("warn",e):"

    "+e+"

    ";return e},i.image=(r=(n={renderer:e,contentBase:r,router:o}).renderer,u=n.contentBase,p=n.router,r.image=function(e,t,n){var r=e,i=[],o=Sn(t),a=o.str,o=o.config;return t=a,o["no-zoom"]&&i.push("data-no-zoom"),t&&i.push('title="'+t+'"'),o.size&&(t=(a=o.size.split("x"))[0],(a=a[1])?i.push('width="'+t+'" height="'+a+'"'):i.push('width="'+t+'"')),o.class&&i.push('class="'+o.class+'"'),o.id&&i.push('id="'+o.id+'"'),O(e)||(r=N(u,F(p.getCurrentPath()),e)),0":''+n+'"}),i.list={renderer:e}.renderer.list=function(e,t,n){t=t?"ol":"ul";return"<"+t+" "+[/
  • /.test(e.split('class="task-list"')[0])?'class="task-list"':"",n&&1"+e+""},i.listitem={renderer:e}.renderer.listitem=function(e){return/^(]*>)/.test(e)?'
  • ":"
  • "+e+"
  • "},e.origin=i,e},On.prototype.sidebar=function(e,t){var n=this.toc,r=this.router.getCurrentPath(),i="";if(e)i=this.compile(e);else{for(var o=0;o{inner}");this.cacheTree[r]=t}return i},On.prototype.subSidebar=function(e){if(e){var t=this.router.getCurrentPath(),n=this.cacheTree,r=this.toc;r[0]&&r[0].ignoreAllSubs&&r.splice(0),r[0]&&1===r[0].level&&r.shift();for(var i=0;i\n'+e+"\n"}]).links={}:(t=[{type:"html",text:e}]).links={}),o({token:i,embedToken:t}),++l>=s&&o({})}}(t);t.embed.url?ce(t.embed.url).then(r):r(t.embed.html)}}({compile:n,embedTokens:s,fetch:t},function(e){var t,n=e.embedToken,e=e.token;e?(t=e.index,u.forEach(function(e){t>e.start&&(t+=e.length)}),y(c,n.links),a=a.slice(0,t).concat(n,a.slice(t+1)),u.push({start:t,length:n.length-1})):(zn[i]=a.concat(),a.links=zn[i].links=c,r(a))})}function Mn(e,t,n){var r,i,o,a;return t="function"==typeof n?n(t):"string"==typeof n?(o=[],a=0,(r=n).replace(B,function(t,e,n){o.push(r.substring(a,n-1)),a=n+=t.length+1,o.push(i&&i[t]||function(e){return("00"+("string"==typeof Z[t]?e[Z[t]]():Z[t](e))).slice(-t.length)})}),a!==r.length&&o.push(r.substring(a)),function(e){for(var t="",n=0,r=e||new Date;n404 - Not found","Vue"in window)for(var o=0,a=k(".markdown-section > *").filter(t);oscript").filter(function(e){return!/template/.test(e.type)})[0])||(e=e.innerText.trim())&&new Function(e)()),"Vue"in window){var l,c,u=[],p=Object.keys(n.vueComponents||{});2===i&&p.length&&p.forEach(function(e){window.Vue.options.components[e]||window.Vue.component(e,n.vueComponents[e])}),!Cn&&n.vueGlobalOptions&&"function"==typeof n.vueGlobalOptions.data&&(Cn=n.vueGlobalOptions.data()),u.push.apply(u,Object.keys(n.vueMounts||{}).map(function(e){return[b(r,e),n.vueMounts[e]]}).filter(function(e){var t=e[0];e[1];return t})),(n.vueGlobalOptions||p.length)&&(l=/{{2}[^{}]*}{2}/,c=/<[^>/]+\s([@:]|v-)[\w-:.[\]]+[=>\s]/,u.push.apply(u,k(".markdown-section > *").filter(function(n){return!u.some(function(e){var t=e[0];e[1];return t===n})}).filter(function(e){return e.tagName.toLowerCase()in(n.vueComponents||{})||e.querySelector(p.join(",")||null)||l.test(e.outerHTML)||c.test(e.outerHTML)}).map(function(e){var t=y({},n.vueGlobalOptions||{});return Cn&&(t.data=function(){return Cn}),[e,t]})));for(var h=0,d=u;h([^<]*?)

    $'))&&("color"===t[2]?r.style.background=t[1]+(t[3]||""):(e=t[1],S(r,"add","has-mask"),O(t[1])||(e=N(this.router.getBasePath(),t[1])),r.style.backgroundImage="url("+e+")",r.style.backgroundSize="cover",r.style.backgroundPosition="center center"),n=n.replace(t[0],"")),this._renderTo(".cover-main",n),he()):S(r,"remove","show")},t.prototype._updateRender=function(){var e,t,n,r;e=this,t=d(".app-name-link"),n=e.config.nameLink,r=e.route.path,t&&(c(e.config.nameLink)?t.setAttribute("href",n):"object"==typeof n&&(e=Object.keys(n).filter(function(e){return-1'):"")),e.coverpage&&(c+=(r=", 100%, 85%",'
    \x3c!--cover--\x3e
    ')),e.logo&&(r=/^data:image/.test(e.logo),t=/(?:http[s]?:)?\/\//.test(e.logo),n=/^\./.test(e.logo),r||t||n||(e.logo=N(this.router.getBasePath(),e.logo))),c+=(n=(t=e).name?t.name:"","
    "+('')+'
    \x3c!--main--\x3e
    '),this._renderTo(l,c,!0)):this.rendered=!0,e.mergeNavbar&&h?u=b(".sidebar"):(s.classList.add("app-nav"),e.repo||s.classList.add("no-badge")),e.loadNavbar&&w(u,s),e.themeColor&&(f.head.appendChild(v("div","").firstElementChild),o=e.themeColor,window.CSS&&window.CSS.supports&&window.CSS.supports("(--v:red)")||(e=k("style:not(.inserted),link"),[].forEach.call(e,function(e){if("STYLE"===e.nodeName)ue(e,o);else if("LINK"===e.nodeName){e=e.getAttribute("href");if(!/\.css$/.test(e))return;ce(e).then(function(e){e=v("style",e);m.appendChild(e),ue(e,o)})}}))),this._updateRender(),S(g,"ready")},t}(function(n){function e(){for(var e=[],t=arguments.length;t--;)e[t]=arguments[t];n.apply(this,e),this.route={}}return n&&(e.__proto__=n),((e.prototype=Object.create(n&&n.prototype)).constructor=e).prototype.updateRender=function(){this.router.normalize(),this.route=this.router.parse(),g.setAttribute("data-page",this.route.file)},e.prototype.initRouter=function(){var t=this,e=this.config,e=new("history"===(e.routerMode||"hash")&&i?q:H)(e);this.router=e,this.updateRender(),U=this.route,e.onchange(function(e){t.updateRender(),t._updateRender(),U.path!==t.route.path?(t.$fetch(u,t.$resetEvents.bind(t,e.source)),U=t.route):t.$resetEvents(e.source)})},e}(function(e){function t(){e.apply(this,arguments)}return e&&(t.__proto__=e),((t.prototype=Object.create(e&&e.prototype)).constructor=t).prototype.initLifecycle=function(){var n=this;this._hooks={},this._lifecycle={},["init","mounted","beforeEach","afterEach","doneEach","ready"].forEach(function(e){var t=n._hooks[e]=[];n._lifecycle[e]=function(e){return t.push(e)}})},t.prototype.callHook=function(e,n,r){void 0===r&&(r=u);var i=this._hooks[e],o=function(t){var e=i[t];t>=i.length?r(n):"function"==typeof e?2===e.length?e(n,function(e){n=e,o(t+1)}):(e=e(n),n=void 0===e?n:e,o(t+1)):o(t+1)};o(0)},t}(Hn)))))));function Un(e,t,n){return jn&&jn.abort&&jn.abort(),jn=ce(e,!0,n)}window.Docsify={util:In,dom:t,get:ce,slugify:xn,version:"4.12.2"},window.DocsifyCompiler=On,window.marked=gn,window.Prism=En,e(function(e){return new qn})}(); diff --git a/ymir/web/docs/en-us/README.md b/ymir/web/docs/en-us/README.md new file mode 100644 index 0000000000..d92362c448 --- /dev/null +++ b/ymir/web/docs/en-us/README.md @@ -0,0 +1,379 @@ +# Introduction + +Hi, welcome to the YMIR model production platform, which promote algorithm technology progress by provides end-to-end algorithm development tools for algorithm developers and one-stop services around data processing, model training and other requirements required in the AI development process. Currently, YMIR system supports target detection class model training, which is mainly used to detect the position and category of each object in the diagram. It is suitable for scenarios where there are multiple subjects in the diagram to be identified, or the location and number of subjects to be identified. + + +# Specialized Nouns + +- Class: Class generally refers to the keywords added by users to the YMIR system. These keywords are usually used for training, labeling, i.e., the target objects that users want to detect in the diagram. + +- Class Alias: Alias generally corresponds to the main name of a class. When a user adds an alias to a class, the annotation box corresponding to the alias will be classified by the main name of the class during training. + +- Asset Tag: The tag of a single image data, usually refers to a certain attribute of the image, such as the location of the image's origin, the scene it belongs to, etc. + +- Box Tag: The tag classification of a single annotation box, generally refers to a certain attribute of the annotation box, such as the quality, resolution, etc. + +- Target: The training target is selected by the user in the class, i.e. the object that the current model to be trained wants to detect. + +- DockerImage: The environment for running training, mining and inference tasks. YMIR system provides some default dockerimages, and also supports users to develop and upload their own. + +- GroundTruth(GT): The correct annotation value in the dataset, which is used as the reference annotation of the target to be identified, usually annotated manually. + +- Prediction: The annotation result generated from the dataset after model inference, which is used to evaluate the recognition effect of the model. + +- Iteration: YMIR provides a standardized model iteration process and helps the user fill in the previous operation result by default in each step of the operation. The main goal of iteration is to help users get better quality training data and better models. + +- Deployment: Model as a Service. Model deployment refers to packaging the model inference details into the model and realizing the inference work of all deep learning models through a set of APIs. A standard http interface is provided to support fast user integration and validation. + +# Usage Process + +The usage flow of YMIR system is generally divided into two categories, one is the meta-operation of the system, including data management, processing, analysis, labeling, and model training, diagnosis, deployment and other functions, the whole process is visualized and easy to operate. Other one is the iterative process provided by the system, which breaks down the key steps in model training and helps users fill in the data to support them in optimizing the model and obtaining the final training results. To learn more about iteration-related operations, please jump to [Model iteration](en-us/README.md#model-iteration). + +We will talk about each step in detail next, and if you have any other questions, please email us at contact.viesc@gmail.com. + +## Create Projects + +First of all, we need to create project in [project management], YMIR system uses project as the dimension for data and model management. + +![create_a_project_1](https://user-images.githubusercontent.com/90443348/197174653-99973fcf-9368-4741-8b68-447f246a6c18.png) + +![create_a_project_2](https://user-images.githubusercontent.com/90443348/197174826-f6eef2bf-a016-4d37-84cb-0336b707a058.png) + +Please note that the training target of the project will be set by default to the training goal you had when [Iterative Process] was enabled. + +## Adding Datasets + +After the project is created, you need to add datasets to the project, import and label the data before training. + +### Adding Classes + +Determine which objects you want to identify before uploading. Each category corresponds to one of the objects you want to detect in the image, and add the corresponding class to [Class Management]. + +### Prepare Dataset + +Prepare the dataset based on the added classes, with the following format requirements. + +- Only zip format zip files are supported for upload. + +- Recommended <200MB for zip file size uploads within the Internet and <1G within the LAN, and path import for datasets over 1G. + +- Supports image types of png, jpg, bmp, jpeg, images that do not match the format will not be imported. + +- If the annotation files need to be imported simultaneously, the annotation file format needs to be Pascal VOC. + +- The image files should be placed in the images folder, the groundtruth files should be placed in the gt folder, and the prediction annotation files should be placed in the pred folder, and the pred folder should contain the model information that generated the prediction results. gt and pred are optional, if not uploaded, the folder should be empty, and the file structure in the zip package is as follows. Click to download the example file: [Sample.zip](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_v2/sample_dataset.zip?raw=true) + +![sample_zip](https://user-images.githubusercontent.com/90443348/197174963-a5d818aa-5d26-4742-99c8-3b23f693be1d.png) + +### Uploading the Dataset + +After you have finished adding categories and preparing data, click the [Add dataset] button to enter the Add dataset page. + +![create_a_dataset_1](https://user-images.githubusercontent.com/90443348/197175120-b7dbe772-83fd-4802-97fc-4d03f2f9e87c.png) + +Data can be imported by the following ways. + +① User's local data: Support local uploading of compressed packages, or importing via network url or path, and the path import requires putting the dataset file under `ymir-workplace/ymir-sharing`, and then filling in the path address `ymir-workplace/ymir-sharing/voc2012` on the import page. + +![create_a_dataset_2](https://user-images.githubusercontent.com/90443348/197175212-392997ae-e25e-4dd3-a4b6-041ea9464a5e.png) + +② Existing data of the platform: Support copying other datasets under this user or importing existing public datasets of the platform. + +![create_a_dataset_3](https://user-images.githubusercontent.com/90443348/197175249-9c655cfa-4db3-4a1a-aa16-ddc3d84876bb.png) + +## Data Mining + +YMIR provides users with sophisticated mining algorithms. The main purpose of data mining is to find data that is useful for model training in unlabeled data. Generally the target data set for mining comes from field data or relevant scenarios, and high value quality data can be found in it after mining. By this way, the labeling cost can be reduced and training set can be expanded after the completion of labeling. + +First select the dataset to be mined, click [Mine] operation, and create a mining task. + +![mining_1](https://user-images.githubusercontent.com/90443348/197175639-84ce9a2e-404d-449f-b6ea-dd40295558c7.png) + +![mining_2](https://user-images.githubusercontent.com/90443348/197183451-4027791b-3c4f-41b1-9642-e48b729c9749.png) + +The topk value is the total amount of data mined, and the mining model should be the one that you expect to improve the effect, if there is a lack of model, you should go to [Model Training] or [Model Import] to get it. + +## Data Annotation + +If the uploaded dataset does not contain annotation files or you need to re-annotate it, you can enter the data annotation page to annotate it. + +Step 1 First select the dataset to be annotated and click [Annotate] to create the annotation task. + +![labelling_1](https://user-images.githubusercontent.com/90443348/197179707-f71f19cf-0d4a-4327-8fb2-1719b0c1b9a5.png) + +Step 2 Fill in the content needed for annotation, the annotation target can be selected in the class list under the current user, support uploading annotation standard documents. If the user has not yet registered for the annotation platform account, click the link below to jump to the annotation platform to register an account. + +![labelling_2](https://user-images.githubusercontent.com/90443348/197179797-3d8f1c03-77c1-450b-b8aa-81deb8576f63.png) + +Step 3 After the annotation task is created, users can view the details of the annotated dataset and jump to the annotation platform to annotate it by themselves. + +![labelling_3](https://user-images.githubusercontent.com/90443348/197180017-c067375d-6bad-44da-9e4d-3d232c5d817b.png) + +## Data Analysis + +You can access this function page by clicking [Dataset Analysis] from the left menu action bar under [Project Management]. + +Data analysis is designed to perform quality checks on the image data in your dataset, by providing objective metrics to guide you with reference to the next operations (annotation, training, diagnosis, etc.). + +The overall quality check report will include statistics on metrics at both groundtruth value and prediction value. + +The analysis results are divided into two categories: overall metrics and distribution metrics. The overall metrics include four categories: storage size, total number of labeled frames, average number of labeled frames, and the proportion of labeled images; the distribution metrics include six categories: image storage size distribution, image aspect ratio distribution, image resolution distribution, image quality distribution, labeled frame resolution distribution, and category proportion distribution. + +The analysis report of different datasets can be viewed by switching the datasets, and multiple datasets can be selected for comparison. + +![data_analysis_1](https://user-images.githubusercontent.com/90443348/197180186-9f112a9d-e6d7-4848-afc0-48f895c6b1dd.png) + +## Model Training + +### Function Page + +You can access this function page by clicking [Model Training] from the left menu action bar under [Project Management]. + +![training_1](https://user-images.githubusercontent.com/90443348/197180300-3f6de7e0-db9c-4de4-9e76-26f97c1b9b5b.png) + +If you have a specified dataset as the training set, you can also access the training page from the operation portal on the right side of the dataset. + +![training_2](https://user-images.githubusercontent.com/90443348/197180387-11166212-8bac-4241-b931-1c3984a22130.png) + +### Training configuration + +![image](https://user-images.githubusercontent.com/90443348/197182072-320b792b-fb1c-42a1-9437-8f652749213b.png) + +Step 1 Select a dockerimage + +YMIR provides the default dockerimage, which supports yolo v5 training. If you need other images, you can go to [My images] - [Public images] list page by the administrator to pull more images, refer to [docker management](en-us/README.md#docker-management) for details. + +Step 2 Select training set + +Select the dataset you want to use for the model training. The optional list is the dataset under the current project, and cannot be selected across projects. Note that please make sure the selected dataset has been annotated, otherwise the training cannot be started, and the model may be better if the dataset is better annotated. + +Step 3 Select the training target + +The training target is the object category you want to recognize in this training, and it only supports the selection in the class list of the selected training set. After the selection is finished, you can click the [Calculate Positive and Negative Samples] button to calculate the percentage of the currently selected classes in the training set. + +Step 4 Select the validation set + +When the AI model is trained, each batch of training data will be tested for model effect, and the images in the validation set will be used as validation data to adjust the training through the result feedback. Therefore, it is necessary to select a data set that is consistent with the training target as the validation set for model improvement. The validation set also needs to be labeled data, otherwise it will affect the final model effect. + +Step 5 Pre-training model + +Pre-training model: In the model iteration training, the user adds training data to the original training data, and can train the model by loading the model parameters trained from the original training data. This can make the model converge faster and the training time shorter, and at the same time, the model results may be better if the dataset quality is higher. + +Note: Only the model trained under the same training dockerimage can be selected as the pre-trained model. + +Step 6 Number of GPUs + +At present, YMIR only supports GPU training, so you can adjust the number of GPUs used for this training here to allocate resources reasonably. + +Step 7 Hyperparameter Configuration + +It is recommended that users who have some knowledge of deep learning should consider using it according to the actual situation. The hyper-parameter configuration provides the built-in parameter modification function of the dockerimage, and additionally provides the "longest edge scaling" configuration item. + +- The longest length scaling: you can input the value to adjust the image size of the training data, set the longest edge of the image to the value you set, and scale the other edge length proportionally. + +### Model Training + +Click [Start Training] to train the model. + +- The training time is related to the size of the data, 1000 images may take several hours to train, please be patient. + +- During the training process, you can check the training progress of the model in the [Model List] page. + +![training_4](https://user-images.githubusercontent.com/90443348/197182370-19df02ce-786f-444b-8a13-1f26606af568.png) + +- To view more information about the model training process, you can open the [Model Details] page and click the [Training Process] button to view the training information. + +![training_5](https://user-images.githubusercontent.com/90443348/197182427-4b2b514d-d692-4c9a-be36-8804c447f848.png) + +![training_6](https://user-images.githubusercontent.com/90443348/197182469-a083e67b-7107-46ef-9a7b-cdef2facd31e.png) + +## Model Diagnostics  + +Model effects can be understood through model inference or model diagnostics. + +### Model Inference + +The [Inference] operation of the model generates inference results on the selected test set, and supports simultaneous selection of multiple data sets or models for inference. + +![inference_1](https://user-images.githubusercontent.com/90443348/197182648-5a1c3562-7cb0-47f1-bfcc-2208fc491b65.png) + +![inference_2](https://user-images.githubusercontent.com/90443348/197182868-641fef4b-345c-49b2-aed5-2235930cfc5c.png) + +Support visualization of inference results after inference is completed. + +![inference_3](https://user-images.githubusercontent.com/90443348/197183620-7122fe6a-1706-49fe-b82d-89b3f49b5803.png) + +![inference_4](https://user-images.githubusercontent.com/90443348/197183664-7f937c92-ceec-42f9-939a-a4f707d3554c.png) + +![inference_5](https://user-images.githubusercontent.com/90443348/197183705-1957172d-8cea-4294-a5aa-9c969ca76972.png) + +The current visualization supports metrics evaluation of inference results, including FP, FN, TP and MTP, and supports filtering view by class. + +FP: False Positive, the criteria value of the current test image does not contain the correct detection target, but the model identifies it incorrectly as the detection target. That is, a negative sample is predicted to be a positive class in the prediction result. + +FN: False Negative, the criterion value of the current test image is the correct detection target, but the model does not recognize it or incorrectly identifies it as other targets. That is, the positive samples are predicted to be in the negative class in the prediction result. + +TP: True Positive, i.e., the model prediction result that matches the standard value under the target prediction class. + +MTP: Matched True Positive, i.e., the criterion that matches the prediction result under the target prediction class. + +### Model Diagnosis + +Find the [Model Diagnosis] module in the left navigation bar of [Project Management] to evaluate the effect of the model online. The specific operations are: ① select the model you want to evaluate, ② select the test set (the selected test set needs to have completed inference on the model, refer to [Model Inference](en-us/README.md#model-inference) for the specific steps), ③ adjust the evaluation parameters and click Diagnosis. + +- You can switch the metrics to view the model diagnosis results under different parameters. The diagnosis results include mAP, PR curve, precision rate, and recall rate. Examples of the displayed results are as follows. + +![idiagnosis_1](https://user-images.githubusercontent.com/90443348/197183853-7039ab7c-cbe5-47c9-9e74-b7e7134bb138.png) + +![diagnosis_2](https://user-images.githubusercontent.com/90443348/197184003-1b6328b8-33c7-4da5-879f-0854cdae333f.png) + +When looking at the model diagnosis results, you need to think about which metric is more important in the current business scenario, accuracy or recall. Do you want to reduce false positives or false negatives. The former needs to focus more on the precision rate metric, and the latter needs to focus more on the recall rate metric. The evaluation metrics are described as follows. + +mAP: mAP (mean average precision) is a metric for measuring the effectiveness of an algorithm in an object detection (Object Detection) algorithm. For the object detection task, each class of object can calculate its precision (Precision) and recall (Recall), multiple calculations/trials at different thresholds, each class can get a P-R curve, can be calculated based on the area under the curve mAP. + +Accuracy: The ratio of the number of correctly predicted objects to the total number of predicted objects. + +Recall: The ratio of the number of correctly predicted objects to the number of real objects. + +## Docker Management + +Docker management is an advanced function, currently open only for administrators. Docker management supports users to upload custom dockerimages to achieve the user's ideal training, mining and inference operations. Please refer to [docker development documentation](https://github.com/IndustryEssentials/ymir/tree/master/docker_executor/sample_executor) for specific development standards of dockerimages. + +### Add dockerimages + +Administrator enter [My Images] page, click [New Image] button, fill in the dockerimage name and address, complete the addition of the dockerimage. + +![docker_1](https://user-images.githubusercontent.com/90443348/197184352-cd4ac31c-913e-4d93-a582-60b524addecd.png) + +You can also add a dockerimage by copying a public image, enter the [Public Images] page, click the [Copy] button, modify the name and description, and finish copying the public dockerimage. + +![docker_2](https://user-images.githubusercontent.com/90443348/197184396-9693de51-e26f-439c-a8f3-20b16502b3cc.png) + +### Associated dockerimages + +User-defined training, mining and inference dockerimages, in general, need to be associative to ensure that the operational processes can be concatenated. This means that the model trained by the training dockerimage created by user A cannot be adapted to the mining or inference dockerimage created by user B in general. In order to facilitate users to remember the relationship between different categories of dockerimages, the platform has designed a special image association function. Click the [Association] button of the training dockerimage to select the corresponding mining dockerimage. + +![docker_3](https://user-images.githubusercontent.com/90443348/197184508-3ca35612-0be8-4cae-8205-ddfa6da1812c.png) + +![docker_4](https://user-images.githubusercontent.com/90443348/197184565-d6521722-a65e-4d71-bf92-2cd6daefd0c9.png) + +Note: Currently, only training images can be associated with mining images. + +## Model Iteration + +### Functional Overview + +It is difficult to train a model to the best result at one time, and it may be necessary to continuously expand data and tune it by combining model inference results and diagnostic data. + +For this reason, we designed the model iteration function. The figure below shows a complete model iteration process, in which the user continuously adjusts the training data and algorithm through multiple iterations, and trains multiple times to obtain better model results. + +![workflow](https://user-images.githubusercontent.com/90443348/197184675-4ed1ecdc-e434-4389-a7bf-e55ed55de260.png) + +When iteration is enabled, YMIR provides a standardized model iteration process and will help users fill in the previous operation results by default in each step of the operation. If the result of the current operation does not meet your expectation, you can also choose to skip the current operation. + +### Function Portal + +After creating a project, you can access it by clicking the [Processing models training] button on the [Project Summary] page, or you can access it directly by clicking [Project Iteration] from the left menu operation column under [Project Management]. + +![iteration_1](https://user-images.githubusercontent.com/90443348/197185933-7b66caf4-3871-4afa-a5fc-a60124ff731f.png) + +### Prepare for an iteration + +To start an iteration the user needs to prepare the data set to be used and the initial model, the role of each type of data is as follows. + +- Training set: the initial data used for training, the categories of the training set need to contain the target categories of the current project. The training set will be continuously expanded during each iteration by means of version updates. + +- Mining set: This type of data set is large in number and can not be labeled with the target category yet. When users think there is no more valuable data in the mining set, they can replace it by themselves. + +- Validation set: It is used to verify the data during the training process of the model, and the model iteration process uses a unified validation set to participate in the training. +The model iteration process uses a unified validation set to participate in training, which can better compare the model training effect. + +- Test set: It is used to test the effect of the model after training, and is generally used in the inference and diagnosis of the model to compare the effect of the model in different data environments. + +- Initial model: The model used for mining in the first iteration, which can be imported by the user, and also supports training by the user through the imported training set. + +Set up the above data categories separately in the iteration preparation interface, and click the [Use Iteration Function to Enhance Model Effect] button after completion to enter the iteration process. + +![Iteration_3](https://user-images.githubusercontent.com/90443348/197186038-345fd500-c5e5-4d8c-91fd-f631219717cd.png) + +### Iteration Process + +step 1 Preparation of mining data + +This operation is used to determine the data to be mined, filter or de-duplicate the data on the selected mining set, and the final result is the data to be mined in the next step, this step can be skipped. + +![Iteration_pre_1](https://user-images.githubusercontent.com/90443348/197186246-8414cf84-8da4-444b-8bf4-254a7622b541.png) + +step 2 Data Mining + +According to the data to be mined obtained in the previous step, set the amount of data that the user wants to mine, other parameters are filled in with the help of the iterative system. Note that the model used for mining here is the final training model obtained from your last iteration (if it is the first iteration, here is the initial model you set), and this step can be skipped after the mining task is completed to get the mining result data. + +step 3 Data Annotation + +The result after mining usually does not have the label of the target category that the user wants to train, which requires manual labeling of the mining result, click the [Data Label] button in the iterative process to enter the labeling page, the data to be labeled is the mining result in the previous step, other operations refer to [Data Label](en-us/README.md#data-label), this step can be skipped. + +step 4 Update the training set + +The main purpose of the iteration is to expand the user's training data, merge the already labeled mining results into the previous training set, and generate a new version of the training set for model training. + +step 5 Model Training + +The merged training set needs to be trained again to generate a new model. Note that the validation set here is the validation set set set by the user before the iteration, which is not supported to be changed for the time being to ensure the consistency of the model effect. Click the [Training] button to get the model results of this iteration. + +step 6 Next Iteration + +If the result does not meet your requirement, you can click the [Start Next Iteration] button to proceed to the next iteration. The process is the same for each iteration, and you can skip some steps according to your needs. + +step7 View Iteration History + +After finishing the iteration process, if you need to view the information of the previous or current iteration, you can click the [Iteration History] tab to view the history of the iteration. + +![Iteration_history](https://user-images.githubusercontent.com/90443348/197186463-b6b7b08b-ba4f-408f-ad4b-b2fb192894ab.png) + +## Model Deployment + +For performance and speed reasons, the models generated by the algorithm platform training are not used directly, but are converted to models usable by specific hardware platforms by providing one-click model conversion and quantization tools. + +### Local Deployment + +step 1 Enter the [Model List] page, click the [Publish] button, and then go to the [Model Deployment] module [My Algorithms] page to check the publishing results after the publishing is finished. + +![release_1](https://user-images.githubusercontent.com/90443348/197187385-f8cf6992-c413-4153-b366-2a31a7fd2d08.png) + +![release_2](https://user-images.githubusercontent.com/90443348/197187464-0202d0e2-2d5c-401c-97aa-604affdaf284.png) + +step 2 Enter the [My Algorithms] page, click the [Deploy] button on the selected released model to enter the [Model Deployment] page, and select the device to be deployed. If you need to select other devices, please go to [Device List] page to add them. + +![deploy_1](https://user-images.githubusercontent.com/90443348/197187529-2c0dc73a-4874-4602-9644-5a3405213fa8.png) + +![deploy_2](https://user-images.githubusercontent.com/90443348/197187588-d02a330d-9ec9-4fb7-a0de-18e1c3a18909.png) + +![deploy_3](https://user-images.githubusercontent.com/90443348/197187612-46df3bde-4dcf-47df-b2e7-b89adda7312a.png) + +Step 3 After the deployment is finished, you can go to the device page to check the model operation. Go to the [Device List] page and click the device name to enter the device details page. You can set the status of the algorithm on the [Algorithm Center] page of the device. + +![device_1](https://user-images.githubusercontent.com/90443348/197187803-474f491e-0bc3-42b1-aec7-6544cce597df.png) + +### Publish to public algorithm library + +step 1 Enter the [My Algorithms] page, click the [Publish to Public Algorithm] button for the selected published model, fill in the information and click [OK], then the algorithm will be handed over to the backend for manual review and packaging. + +![EN_public_alg_1](https://user-images.githubusercontent.com/90443348/197187965-1e856657-6ea9-445c-b666-4500853cb928.jpg) + +![EN_public_alg_2](https://user-images.githubusercontent.com/90443348/197187970-9a5181b5-0c61-47bb-89f6-8967be6a1598.jpg) + +![EN_public_alg_3](https://user-images.githubusercontent.com/90443348/197187978-96a68a6b-566e-4724-8e88-d8dcc754faef.jpg) + +step 2 After the review, you can go to [Model Deployment] - [Public Algorithm] page to see the corresponding model. + +![EN_public_alg_4](https://user-images.githubusercontent.com/90443348/197187982-15b56144-d935-4da2-8504-7a59cafa8a53.jpg) + +![EN_public_alg_5](https://user-images.githubusercontent.com/90443348/197187987-7355f29c-fcf7-45f6-a1b7-0df348d5540a.jpg) + +step 3 Click the model to enter the algorithm detail page, and you can enter the image URL to try it out. + +![EN_public_alg_6](https://user-images.githubusercontent.com/90443348/197187990-5d0aa1cc-c21c-45b6-b85a-c89c6e3d270b.jpg) + +![EN_public_alg_7](https://user-images.githubusercontent.com/90443348/197187995-7d268554-5837-4310-b0c3-218bfdf55c64.jpg) + +![EN_public_alg_8](https://user-images.githubusercontent.com/90443348/197188000-83bd16c5-06e6-4015-95c6-ffaafc8b5361.jpg) diff --git a/ymir/web/docs/en-us/_navbar.md b/ymir/web/docs/en-us/_navbar.md new file mode 100644 index 0000000000..bea54c9677 --- /dev/null +++ b/ymir/web/docs/en-us/_navbar.md @@ -0,0 +1,2 @@ +* [En](/en-us/) +* [简体中文](/) diff --git a/ymir/web/docs/en-us/_sidebar.md b/ymir/web/docs/en-us/_sidebar.md new file mode 100644 index 0000000000..6c2fe5217e --- /dev/null +++ b/ymir/web/docs/en-us/_sidebar.md @@ -0,0 +1 @@ +- [Introduction](en-us/README.md) diff --git a/ymir/web/docs/index.html b/ymir/web/docs/index.html new file mode 100644 index 0000000000..04869c5948 --- /dev/null +++ b/ymir/web/docs/index.html @@ -0,0 +1,20 @@ + + + + + + + + + +
    + + + + diff --git a/ymir/web/docs/vue.css b/ymir/web/docs/vue.css new file mode 100644 index 0000000000..892db91619 --- /dev/null +++ b/ymir/web/docs/vue.css @@ -0,0 +1,857 @@ +* { + -webkit-font-smoothing: antialiased; + -webkit-overflow-scrolling: touch; + -webkit-tap-highlight-color: rgba(0,0,0,0); + -webkit-text-size-adjust: none; + -webkit-touch-callout: none; + box-sizing: border-box; +} +body:not(.ready) { + overflow: hidden; +} +body:not(.ready) [data-cloak], +body:not(.ready) .app-nav, +body:not(.ready) > nav { + display: none; +} +div#app { + font-size: 30px; + font-weight: lighter; + margin: 40vh auto; + text-align: center; +} +div#app:empty::before { + content: 'Loading...'; +} +.emoji { + height: 1.2rem; + vertical-align: middle; +} +.progress { + background-color: var(--theme-color, #42b983); + height: 2px; + left: 0px; + position: fixed; + right: 0px; + top: 0px; + transition: width 0.2s, opacity 0.4s; + width: 0%; + z-index: 999999; +} +.search a:hover { + color: var(--theme-color, #42b983); +} +.search .search-keyword { + color: var(--theme-color, #42b983); + font-style: normal; + font-weight: bold; +} +html, +body { + height: 100%; +} +body { + -moz-osx-font-smoothing: grayscale; + -webkit-font-smoothing: antialiased; + color: #34495e; + font-family: 'Source Sans Pro', 'Helvetica Neue', Arial, sans-serif; + font-size: 15px; + letter-spacing: 0; + margin: 0; + overflow-x: hidden; +} +img { + max-width: 100%; +} +a[disabled] { + cursor: not-allowed; + opacity: 0.6; +} +kbd { + border: solid 1px #ccc; + border-radius: 3px; + display: inline-block; + font-size: 12px !important; + line-height: 12px; + margin-bottom: 3px; + padding: 3px 5px; + vertical-align: middle; +} +li input[type='checkbox'] { + margin: 0 0.2em 0.25em 0; + vertical-align: middle; +} +.app-nav { + margin: 25px 60px 0 0; + position: absolute; + right: 0; + text-align: right; + z-index: 10; +/* navbar dropdown */ +} +.app-nav.no-badge { + margin-right: 25px; +} +.app-nav p { + margin: 0; +} +.app-nav > a { + margin: 0 1rem; + padding: 5px 0; +} +.app-nav ul, +.app-nav li { + display: inline-block; + list-style: none; + margin: 0; +} +.app-nav a { + color: inherit; + font-size: 16px; + text-decoration: none; + transition: color 0.3s; +} +.app-nav a:hover { + color: var(--theme-color, #42b983); +} +.app-nav a.active { + border-bottom: 2px solid var(--theme-color, #42b983); + color: var(--theme-color, #42b983); +} +.app-nav li { + display: inline-block; + margin: 0 1rem; + padding: 5px 0; + position: relative; + cursor: pointer; +} +.app-nav li ul { + background-color: #fff; + border: 1px solid #ddd; + border-bottom-color: #ccc; + border-radius: 4px; + box-sizing: border-box; + display: none; + max-height: calc(100vh - 61px); + overflow-y: auto; + padding: 10px 0; + position: absolute; + right: -15px; + text-align: left; + top: 100%; + white-space: nowrap; +} +.app-nav li ul li { + display: block; + font-size: 14px; + line-height: 1rem; + margin: 0; + margin: 8px 14px; + white-space: nowrap; +} +.app-nav li ul a { + display: block; + font-size: inherit; + margin: 0; + padding: 0; +} +.app-nav li ul a.active { + border-bottom: 0; +} +.app-nav li:hover ul { + display: block; +} +.github-corner { + border-bottom: 0; + position: fixed; + right: 0; + text-decoration: none; + top: 0; + z-index: 1; +} +.github-corner:hover .octo-arm { + -webkit-animation: octocat-wave 560ms ease-in-out; + animation: octocat-wave 560ms ease-in-out; +} +.github-corner svg { + color: #fff; + fill: var(--theme-color, #42b983); + height: 80px; + width: 80px; +} +main { + display: block; + position: relative; + width: 100vw; + height: 100%; + z-index: 0; +} +main.hidden { + display: none; +} +.anchor { + display: inline-block; + text-decoration: none; + transition: all 0.3s; +} +.anchor span { + color: #34495e; +} +.anchor:hover { + text-decoration: underline; +} +.sidebar { + border-right: 1px solid rgba(0,0,0,0.07); + overflow-y: auto; + padding: 40px 0 0; + position: absolute; + top: 0; + bottom: 0; + left: 0; + transition: transform 250ms ease-out; + width: 300px; + z-index: 20; +} +.sidebar > h1 { + margin: 0 auto 1rem; + font-size: 1.5rem; + font-weight: 300; + text-align: center; +} +.sidebar > h1 a { + color: inherit; + text-decoration: none; +} +.sidebar > h1 .app-nav { + display: block; + position: static; +} +.sidebar .sidebar-nav { + line-height: 2em; + padding-bottom: 40px; +} +.sidebar li.collapse .app-sub-sidebar { + display: none; +} +.sidebar ul { + margin: 0 0 0 15px; + padding: 0; +} +.sidebar li > p { + font-weight: 700; + margin: 0; +} +.sidebar ul, +.sidebar ul li { + list-style: none; +} +.sidebar ul li a { + border-bottom: none; + display: block; +} +.sidebar ul li ul { + padding-left: 20px; +} +.sidebar::-webkit-scrollbar { + width: 4px; +} +.sidebar::-webkit-scrollbar-thumb { + background: transparent; + border-radius: 4px; +} +.sidebar:hover::-webkit-scrollbar-thumb { + background: rgba(136,136,136,0.4); +} +.sidebar:hover::-webkit-scrollbar-track { + background: rgba(136,136,136,0.1); +} +.sidebar-toggle { + background-color: transparent; + background-color: rgba(255,255,255,0.8); + border: 0; + outline: none; + padding: 10px; + position: absolute; + bottom: 0; + left: 0; + text-align: center; + transition: opacity 0.3s; + width: 284px; + z-index: 30; + cursor: pointer; +} +.sidebar-toggle:hover .sidebar-toggle-button { + opacity: 0.4; +} +.sidebar-toggle span { + background-color: var(--theme-color, #42b983); + display: block; + margin-bottom: 4px; + width: 16px; + height: 2px; +} +body.sticky .sidebar, +body.sticky .sidebar-toggle { + position: fixed; +} +.content { + padding-top: 60px; + position: absolute; + top: 0; + right: 0; + bottom: 0; + left: 300px; + transition: left 250ms ease; +} +.markdown-section { + margin: 0 auto; + max-width: 80%; + padding: 30px 15px 40px 15px; + position: relative; +} +.markdown-section > * { + box-sizing: border-box; + font-size: inherit; +} +.markdown-section > :first-child { + margin-top: 0 !important; +} +.markdown-section hr { + border: none; + border-bottom: 1px solid #eee; + margin: 2em 0; +} +.markdown-section iframe { + border: 1px solid #eee; +/* fix horizontal overflow on iOS Safari */ + width: 1px; + min-width: 100%; +} +.markdown-section table { + border-collapse: collapse; + border-spacing: 0; + display: block; + margin-bottom: 1rem; + overflow: auto; + width: 100%; +} +.markdown-section th { + border: 1px solid #ddd; + font-weight: bold; + padding: 6px 13px; +} +.markdown-section td { + border: 1px solid #ddd; + padding: 6px 13px; +} +.markdown-section tr { + border-top: 1px solid #ccc; +} +.markdown-section tr:nth-child(2n) { + background-color: #f8f8f8; +} +.markdown-section p.tip { + background-color: #f8f8f8; + border-bottom-right-radius: 2px; + border-left: 4px solid #f66; + border-top-right-radius: 2px; + margin: 2em 0; + padding: 12px 24px 12px 30px; + position: relative; +} +.markdown-section p.tip:before { + background-color: #f66; + border-radius: 100%; + color: #fff; + content: '!'; + font-family: 'Dosis', 'Source Sans Pro', 'Helvetica Neue', Arial, sans-serif; + font-size: 14px; + font-weight: bold; + left: -12px; + line-height: 20px; + position: absolute; + height: 20px; + width: 20px; + text-align: center; + top: 14px; +} +.markdown-section p.tip code { + background-color: #efefef; +} +.markdown-section p.tip em { + color: #34495e; +} +.markdown-section p.warn { + background: rgba(66,185,131,0.1); + border-radius: 2px; + padding: 1rem; +} +.markdown-section ul.task-list > li { + list-style-type: none; +} +body.close .sidebar { + transform: translateX(-300px); +} +body.close .sidebar-toggle { + width: auto; +} +body.close .content { + left: 0; +} +@media print { + .github-corner, + .sidebar-toggle, + .sidebar, + .app-nav { + display: none; + } +} +@media screen and (max-width: 768px) { + .github-corner, + .sidebar-toggle, + .sidebar { + position: fixed; + } + .app-nav { + margin-top: 16px; + } + .app-nav li ul { + top: 30px; + } + main { + height: auto; + min-height: 100vh; + overflow-x: hidden; + } + .sidebar { + left: -300px; + transition: transform 250ms ease-out; + } + .content { + left: 0; + max-width: 100vw; + position: static; + padding-top: 20px; + transition: transform 250ms ease; + } + .app-nav, + .github-corner { + transition: transform 250ms ease-out; + } + .sidebar-toggle { + background-color: transparent; + width: auto; + padding: 30px 30px 10px 10px; + } + body.close .sidebar { + transform: translateX(300px); + } + body.close .sidebar-toggle { + background-color: rgba(255,255,255,0.8); + transition: 1s background-color; + width: 284px; + padding: 10px; + } + body.close .content { + transform: translateX(300px); + } + body.close .app-nav, + body.close .github-corner { + display: none; + } + .github-corner:hover .octo-arm { + -webkit-animation: none; + animation: none; + } + .github-corner .octo-arm { + -webkit-animation: octocat-wave 560ms ease-in-out; + animation: octocat-wave 560ms ease-in-out; + } +} +@-webkit-keyframes octocat-wave { + 0%, 100% { + transform: rotate(0); + } + 20%, 60% { + transform: rotate(-25deg); + } + 40%, 80% { + transform: rotate(10deg); + } +} +@keyframes octocat-wave { + 0%, 100% { + transform: rotate(0); + } + 20%, 60% { + transform: rotate(-25deg); + } + 40%, 80% { + transform: rotate(10deg); + } +} +section.cover { + align-items: center; + background-position: center center; + background-repeat: no-repeat; + background-size: cover; + height: 100vh; + width: 100vw; + display: none; +} +section.cover.show { + display: flex; +} +section.cover.has-mask .mask { + background-color: #fff; + opacity: 0.8; + position: absolute; + top: 0; + height: 100%; + width: 100%; +} +section.cover .cover-main { + flex: 1; + margin: -20px 16px 0; + text-align: center; + position: relative; +} +section.cover a { + color: inherit; + text-decoration: none; +} +section.cover a:hover { + text-decoration: none; +} +section.cover p { + line-height: 1.5rem; + margin: 1em 0; +} +section.cover h1 { + color: inherit; + font-size: 2.5rem; + font-weight: 300; + margin: 0.625rem 0 2.5rem; + position: relative; + text-align: center; +} +section.cover h1 a { + display: block; +} +section.cover h1 small { + bottom: -0.4375rem; + font-size: 1rem; + position: absolute; +} +section.cover blockquote { + font-size: 1.5rem; + text-align: center; +} +section.cover ul { + line-height: 1.8; + list-style-type: none; + margin: 1em auto; + max-width: 500px; + padding: 0; +} +section.cover .cover-main > p:last-child a { + border-color: var(--theme-color, #42b983); + border-radius: 2rem; + border-style: solid; + border-width: 1px; + box-sizing: border-box; + color: var(--theme-color, #42b983); + display: inline-block; + font-size: 1.05rem; + letter-spacing: 0.1rem; + margin: 0.5rem 1rem; + padding: 0.75em 2rem; + text-decoration: none; + transition: all 0.15s ease; +} +section.cover .cover-main > p:last-child a:last-child { + background-color: var(--theme-color, #42b983); + color: #fff; +} +section.cover .cover-main > p:last-child a:last-child:hover { + color: inherit; + opacity: 0.8; +} +section.cover .cover-main > p:last-child a:hover { + color: inherit; +} +section.cover blockquote > p > a { + border-bottom: 2px solid var(--theme-color, #42b983); + transition: color 0.3s; +} +section.cover blockquote > p > a:hover { + color: var(--theme-color, #42b983); +} +body { + background-color: #fff; +} +/* sidebar */ +.sidebar { + background-color: #fff; + color: #364149; +} +.sidebar li { + margin: 6px 0 6px 0; +} +.sidebar ul li a { + color: #505d6b; + font-size: 14px; + font-weight: normal; + overflow: hidden; + text-decoration: none; + text-overflow: ellipsis; + white-space: nowrap; +} +.sidebar ul li a:hover { + text-decoration: underline; +} +.sidebar ul li ul { + padding: 0; +} +.sidebar ul li.active > a { + border-right: 2px solid; + color: var(--theme-color, #42b983); + font-weight: 600; +} +.app-sub-sidebar li::before { + content: '-'; + padding-right: 4px; + float: left; +} +/* markdown content found on pages */ +.markdown-section h1, +.markdown-section h2, +.markdown-section h3, +.markdown-section h4, +.markdown-section strong { + color: #2c3e50; + font-weight: 600; +} +.markdown-section a { + color: var(--theme-color, #42b983); + font-weight: 600; +} +.markdown-section h1 { + font-size: 2rem; + margin: 0 0 1rem; +} +.markdown-section h2 { + font-size: 1.75rem; + margin: 45px 0 0.8rem; +} +.markdown-section h3 { + font-size: 1.5rem; + margin: 40px 0 0.6rem; +} +.markdown-section h4 { + font-size: 1.25rem; +} +.markdown-section h5 { + font-size: 1rem; +} +.markdown-section h6 { + color: #777; + font-size: 1rem; +} +.markdown-section figure, +.markdown-section p { + margin: 1.2em 0; +} +.markdown-section p, +.markdown-section ul, +.markdown-section ol { + line-height: 1.6rem; + word-spacing: 0.05rem; +} +.markdown-section ul, +.markdown-section ol { + padding-left: 1.5rem; +} +.markdown-section blockquote { + border-left: 4px solid var(--theme-color, #42b983); + color: #858585; + margin: 2em 0; + padding-left: 20px; +} +.markdown-section blockquote p { + font-weight: 600; + margin-left: 0; +} +.markdown-section iframe { + margin: 1em 0; +} +.markdown-section em { + color: #7f8c8d; +} +.markdown-section code, +.markdown-section pre, +.markdown-section output::after { + font-family: 'Roboto Mono', Monaco, courier, monospace; +} +.markdown-section code, +.markdown-section pre { + background-color: #f8f8f8; +} +.markdown-section pre, +.markdown-section output { + margin: 1.2em 0; + position: relative; +} +.markdown-section pre > code, +.markdown-section output { + border-radius: 2px; + display: block; +} +.markdown-section pre > code, +.markdown-section output::after { + -moz-osx-font-smoothing: initial; + -webkit-font-smoothing: initial; +} +.markdown-section code { + border-radius: 2px; + color: #e96900; + margin: 0 2px; + padding: 3px 5px; + white-space: pre-wrap; +} +.markdown-section > :not(h1):not(h2):not(h3):not(h4):not(h5):not(h6) code { + font-size: 0.8rem; +} +.markdown-section pre { + padding: 0 1.4rem; + line-height: 1.5rem; + overflow: auto; + word-wrap: normal; +} +.markdown-section pre > code { + color: #525252; + font-size: 0.8rem; + padding: 2.2em 5px; + line-height: inherit; + margin: 0 2px; + max-width: inherit; + overflow: inherit; + white-space: inherit; +} +.markdown-section output { + padding: 1.7rem 1.4rem; + border: 1px dotted #ccc; +} +.markdown-section output > :first-child { + margin-top: 0; +} +.markdown-section output > :last-child { + margin-bottom: 0; +} +.markdown-section code::after, +.markdown-section code::before, +.markdown-section output::after, +.markdown-section output::before { + letter-spacing: 0.05rem; +} +.markdown-section pre::after, +.markdown-section output::after { + color: #ccc; + font-size: 0.6rem; + font-weight: 600; + height: 15px; + line-height: 15px; + padding: 5px 10px 0; + position: absolute; + right: 0; + text-align: right; + top: 0; +} +.markdown-section pre::after, +.markdown-section output::after { + content: attr(data-lang); +} +/* code highlight */ +.token.comment, +.token.prolog, +.token.doctype, +.token.cdata { + color: #8e908c; +} +.token.namespace { + opacity: 0.7; +} +.token.boolean, +.token.number { + color: #c76b29; +} +.token.punctuation { + color: #525252; +} +.token.property { + color: #c08b30; +} +.token.tag { + color: #2973b7; +} +.token.string { + color: var(--theme-color, #42b983); +} +.token.selector { + color: #6679cc; +} +.token.attr-name { + color: #2973b7; +} +.token.entity, +.token.url, +.language-css .token.string, +.style .token.string { + color: #22a2c9; +} +.token.attr-value, +.token.control, +.token.directive, +.token.unit { + color: var(--theme-color, #42b983); +} +.token.keyword, +.token.function { + color: #e96900; +} +.token.statement, +.token.regex, +.token.atrule { + color: #22a2c9; +} +.token.placeholder, +.token.variable { + color: #3d8fd1; +} +.token.deleted { + text-decoration: line-through; +} +.token.inserted { + border-bottom: 1px dotted #202746; + text-decoration: none; +} +.token.italic { + font-style: italic; +} +.token.important, +.token.bold { + font-weight: bold; +} +.token.important { + color: #c94922; +} +.token.entity { + cursor: help; +} +code .token { + -moz-osx-font-smoothing: initial; + -webkit-font-smoothing: initial; + min-height: 1.5rem; + position: relative; + left: auto; +} diff --git a/ymir/web/jsconfig.json b/ymir/web/jsconfig.json new file mode 100644 index 0000000000..d951a99634 --- /dev/null +++ b/ymir/web/jsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "target": "ES6", + "module": "CommonJS", + "moduleResolution": "Node", + "importHelpers": true, + "esModuleInterop": true, + "baseUrl": "./", + "strict": true, + "paths": { + "@/*": ["src/*"], + }, + "allowSyntheticDefaultImports": true + }, + "include": [ + "src/**/*" + ], +} \ No newline at end of file diff --git a/ymir/web/mock/dataset.js b/ymir/web/mock/dataset.js index fc02018c98..db086b17f5 100644 --- a/ymir/web/mock/dataset.js +++ b/ymir/web/mock/dataset.js @@ -1,14 +1,14 @@ -import mockjs from 'mockjs' +import mockjs, { Random } from 'mockjs' import { random } from './keyword' import baseApi from './api.js' const item = { "name": "@title(1,4)", "hash": "@string(32)", - "type|1": [1,2,3,4,5], - "state|1": [2,3], - 'version|1': [1,2,3,4,5,6,7,8], + "type|1": [1, 2, 3, 4, 5], + "state|1": [2, 3], + 'version|1': [1, 2, 3, 4, 5, 6, 7, 8], "asset_count": '@integer(2,9999)', "keyword_count": '@integer(1,30)', "task_id": '@integer(1000, 9999)', @@ -18,7 +18,7 @@ const item = { "project_id": 30001, "keywords": '@keywords(2, 5)', "progress": '@integer(0,100)', - "task_state|1": [1,2,3,4], + "task_state|1": [1, 2, 3, 4], "task_progress": '@integer(0,100)' } @@ -36,6 +36,47 @@ const groups = mockjs.mock({ total: 34, }) +const xyz = { + x: '@float(0, 1, 0, 4)', + y: '@float(0, 1, 0, 4)', + z: '@float(0, 1, 0, 4)', +} +const metadata = { + "ap": '@float(0, 1, 0, 4)', + "ar": '@float(0, 1, 0, 4)', + "tp": '@float(0, 1, 0, 4)', + "fp": '@float(0, 1, 0, 4)', + "fn": '@float(0, 1, 0, 4)', + "pr_curve|1-20": [xyz], +} + +const ci = kws => kws.reduce((prev, kw) => ({ + ...prev, + [kw]: metadata, +}), {}) +const ck = cks => cks.reduce((prev, { value, sub }) => ({ + ...prev, + [value]: { + total: metadata, + sub: sub.reduce((prev, kw) => ({ ...prev, [kw]: metadata }), {}) + } +}), {}) +const dsData = () => ({ + conf_thr: '@float(0, 1, 0, 4)', + iou_evaluations: { + [Random.float(0, 1)]: { + ci_evaluations: ci(['person', 'cat']), + ck_evaluations: ck([{ value: 'day', sub: ['rainy', 'sunny'] }, { value: 'color', sub: ['black', 'white'] }]), + }, + }, +}) + +const datasets = [80, 81] +const evaluation = mockjs.mock(datasets.reduce((prev, id) => ({ + ...prev, + [id]: dsData(), +}), {})) + export default baseApi([ { url: 'dataset_groups/', @@ -61,4 +102,11 @@ export default baseApi([ result: mockjs.mock(item), } }, + { + method: 'post', + url: 'datasets/evaluation', + data: { + result: evaluation, + } + } ]) diff --git a/ymir/web/nginx.conf.template b/ymir/web/nginx.conf.template index a828e274dd..65d90f5a9e 100644 --- a/ymir/web/nginx.conf.template +++ b/ymir/web/nginx.conf.template @@ -19,12 +19,22 @@ server { # use 'listen 80 deferred;' for Linux # use 'listen 80 accept_filter=httpready;' for FreeBSD client_max_body_size 4G; + + gzip on; + gzip_min_length 1k; + gzip_comp_level 5; + gzip_types text/plain application/javascript application/x-javascript text/javascript text/xml text/css; # set the correct host(s) for your site server_name localhost; keepalive_timeout 5; + # Ymir docs + location /docs/ { + root /data/ymir/; + } + location /api/ { proxy_pass http://backend_api/api/; add_header 'Access-Control-Allow-Origin' '*'; @@ -35,7 +45,7 @@ server { } # backend's Swagger UI - location /docs { + location /api_docs { proxy_pass http://backend_api/docs; add_header 'Access-Control-Allow-Origin' '*'; proxy_set_header Host $host; @@ -88,10 +98,10 @@ server { } # static files - location /ymir-assets/ { - rewrite ^(.*)a000000000(.*)$ $1$2 break; + location ~ /ymir-assets/(?.+)/(?.+)$ { types { } default_type "image/jpeg;"; - root /data/ymir/; + root /data/ymir/ymir-assets; + try_files /$prefix/$filename /$filename =404; } location /ymir-models/ { diff --git a/ymir/web/package-lock.json b/ymir/web/package-lock.json index 1250f3a0be..eacab829c4 100644 --- a/ymir/web/package-lock.json +++ b/ymir/web/package-lock.json @@ -1,12 +1,12 @@ { "name": "ymir-web", - "version": "1.1.0.0517", + "version": "1.2.1.0804", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "ymir-web", - "version": "1.1.0.0517", + "version": "1.2.1.0804", "hasInstallScript": true, "dependencies": { "@ant-design/pro-layout": "^6.32.6", @@ -23,6 +23,7 @@ "echarts": "^5.2.2", "react": "17.x", "react-dom": "17.x", + "react-json-view": "^1.21.3", "socket.io-client": "^4.4.1", "umi": "^3.5.20" }, @@ -3534,6 +3535,11 @@ "node": ">=0.10.0" } }, + "node_modules/base16": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/base16/-/base16-1.0.0.tgz", + "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" + }, "node_modules/base64-js": { "version": "1.5.1", "resolved": "https://registry.npmmirror.com/base64-js/download/base64-js-1.5.1.tgz", @@ -4401,6 +4407,52 @@ "yarn": ">=1" } }, + "node_modules/cross-fetch": { + "version": "3.1.5", + "resolved": "https://registry.npmmirror.com/cross-fetch/-/cross-fetch-3.1.5.tgz", + "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "dependencies": { + "node-fetch": "2.6.7" + } + }, + "node_modules/cross-fetch/node_modules/node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/cross-fetch/node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmmirror.com/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "node_modules/cross-fetch/node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "node_modules/cross-fetch/node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.nlark.com/cross-spawn/download/cross-spawn-7.0.3.tgz", @@ -5516,6 +5568,28 @@ "bser": "2.1.1" } }, + "node_modules/fbemitter": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/fbemitter/-/fbemitter-3.0.0.tgz", + "integrity": "sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==", + "dependencies": { + "fbjs": "^3.0.0" + } + }, + "node_modules/fbemitter/node_modules/fbjs": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/fbjs/-/fbjs-3.0.4.tgz", + "integrity": "sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==", + "dependencies": { + "cross-fetch": "^3.1.5", + "fbjs-css-vars": "^1.0.0", + "loose-envify": "^1.0.0", + "object-assign": "^4.1.0", + "promise": "^7.1.1", + "setimmediate": "^1.0.5", + "ua-parser-js": "^0.7.30" + } + }, "node_modules/fbjs": { "version": "0.8.18", "resolved": "https://registry.npmmirror.com/fbjs/download/fbjs-0.8.18.tgz", @@ -5531,6 +5605,11 @@ "ua-parser-js": "^0.7.30" } }, + "node_modules/fbjs-css-vars": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz", + "integrity": "sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==" + }, "node_modules/fbjs/node_modules/core-js": { "version": "1.2.7", "resolved": "https://registry.npmmirror.com/core-js/download/core-js-1.2.7.tgz", @@ -5585,6 +5664,32 @@ "integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg== sha1-wSg6yfJ7Noq8HjbR/3sEUBowNWs=", "deprecated": "flatten is deprecated in favor of utility frameworks such as lodash." }, + "node_modules/flux": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/flux/-/flux-4.0.3.tgz", + "integrity": "sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw==", + "dependencies": { + "fbemitter": "^3.0.0", + "fbjs": "^3.0.1" + }, + "peerDependencies": { + "react": "^15.0.2 || ^16.0.0 || ^17.0.0" + } + }, + "node_modules/flux/node_modules/fbjs": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/fbjs/-/fbjs-3.0.4.tgz", + "integrity": "sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==", + "dependencies": { + "cross-fetch": "^3.1.5", + "fbjs-css-vars": "^1.0.0", + "loose-envify": "^1.0.0", + "object-assign": "^4.1.0", + "promise": "^7.1.1", + "setimmediate": "^1.0.5", + "ua-parser-js": "^0.7.30" + } + }, "node_modules/follow-redirects": { "version": "1.14.7", "resolved": "https://registry.npmmirror.com/follow-redirects/download/follow-redirects-1.14.7.tgz", @@ -9464,11 +9569,21 @@ "resolved": "https://registry.npmmirror.com/lodash.clonedeep/download/lodash.clonedeep-4.5.0.tgz", "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8= sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" }, + "node_modules/lodash.curry": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/lodash.curry/-/lodash.curry-4.1.1.tgz", + "integrity": "sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==" + }, "node_modules/lodash.debounce": { "version": "4.0.8", "resolved": "https://registry.nlark.com/lodash.debounce/download/lodash.debounce-4.0.8.tgz?cache=0&sync_timestamp=1622605323058&other_urls=https%3A%2F%2Fregistry.nlark.com%2Flodash.debounce%2Fdownload%2Flodash.debounce-4.0.8.tgz", "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" }, + "node_modules/lodash.flow": { + "version": "3.5.0", + "resolved": "https://registry.npmmirror.com/lodash.flow/-/lodash.flow-3.5.0.tgz", + "integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==" + }, "node_modules/lodash.isequal": { "version": "4.5.0", "resolved": "https://registry.npmmirror.com/lodash.isequal/download/lodash.isequal-4.5.0.tgz", @@ -11231,6 +11346,11 @@ "node": ">=6" } }, + "node_modules/pure-color": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/pure-color/-/pure-color-1.3.0.tgz", + "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" + }, "node_modules/qs": { "version": "6.10.3", "resolved": "https://registry.npmmirror.com/qs/download/qs-6.10.3.tgz", @@ -11948,6 +12068,17 @@ "node": ">=0.10.0" } }, + "node_modules/react-base16-styling": { + "version": "0.6.0", + "resolved": "https://registry.npmmirror.com/react-base16-styling/-/react-base16-styling-0.6.0.tgz", + "integrity": "sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==", + "dependencies": { + "base16": "^1.0.0", + "lodash.curry": "^4.0.1", + "lodash.flow": "^3.3.0", + "pure-color": "^1.2.0" + } + }, "node_modules/react-dom": { "version": "17.0.2", "resolved": "https://registry.npmmirror.com/react-dom/download/react-dom-17.0.2.tgz", @@ -12020,6 +12151,21 @@ "resolved": "https://registry.npmmirror.com/react-is/download/react-is-17.0.2.tgz", "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== sha1-5pHUqOnHiTZWVVOas3J2Kw77VPA=" }, + "node_modules/react-json-view": { + "version": "1.21.3", + "resolved": "https://registry.npmmirror.com/react-json-view/-/react-json-view-1.21.3.tgz", + "integrity": "sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==", + "dependencies": { + "flux": "^4.0.1", + "react-base16-styling": "^0.6.0", + "react-lifecycles-compat": "^3.0.4", + "react-textarea-autosize": "^8.3.2" + }, + "peerDependencies": { + "react": "^17.0.0 || ^16.3.0 || ^15.5.4", + "react-dom": "^17.0.0 || ^16.3.0 || ^15.5.4" + } + }, "node_modules/react-lifecycles-compat": { "version": "3.0.4", "resolved": "https://registry.npmmirror.com/react-lifecycles-compat/download/react-lifecycles-compat-3.0.4.tgz", @@ -12186,6 +12332,22 @@ "react": "17.0.2" } }, + "node_modules/react-textarea-autosize": { + "version": "8.3.4", + "resolved": "https://registry.npmmirror.com/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz", + "integrity": "sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ==", + "dependencies": { + "@babel/runtime": "^7.10.2", + "use-composed-ref": "^1.3.0", + "use-latest": "^1.2.1" + }, + "engines": { + "node": ">=10" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, "node_modules/react-tween-state": { "version": "0.1.5", "resolved": "https://registry.nlark.com/react-tween-state/download/react-tween-state-0.1.5.tgz", @@ -14420,6 +14582,27 @@ "node": ">=0.10.0" } }, + "node_modules/use-composed-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz", + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/use-isomorphic-layout-effect": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/use-json-comparison": { "version": "1.0.6", "resolved": "https://registry.npmmirror.com/use-json-comparison/download/use-json-comparison-1.0.6.tgz", @@ -14428,6 +14611,22 @@ "react": ">=16.9.0" } }, + "node_modules/use-latest": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/use-latest/-/use-latest-1.2.1.tgz", + "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "dependencies": { + "use-isomorphic-layout-effect": "^1.1.1" + }, + "peerDependencies": { + "react": "^16.8.0 || ^17.0.0 || ^18.0.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, "node_modules/use-media-antd-query": { "version": "1.1.0", "resolved": "https://registry.nlark.com/use-media-antd-query/download/use-media-antd-query-1.1.0.tgz", @@ -18019,6 +18218,11 @@ } } }, + "base16": { + "version": "1.0.0", + "resolved": "https://registry.npmmirror.com/base16/-/base16-1.0.0.tgz", + "integrity": "sha512-pNdYkNPiJUnEhnfXV56+sQy8+AaPcG3POZAUnwr4EeqCUZFz4u2PePbo3e5Gj4ziYPCWGUZT9RHisvJKnwFuBQ==" + }, "base64-js": { "version": "1.5.1", "resolved": "https://registry.npmmirror.com/base64-js/download/base64-js-1.5.1.tgz", @@ -18759,6 +18963,43 @@ "cross-spawn": "^7.0.1" } }, + "cross-fetch": { + "version": "3.1.5", + "resolved": "https://registry.npmmirror.com/cross-fetch/-/cross-fetch-3.1.5.tgz", + "integrity": "sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==", + "requires": { + "node-fetch": "2.6.7" + }, + "dependencies": { + "node-fetch": { + "version": "2.6.7", + "resolved": "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.6.7.tgz", + "integrity": "sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==", + "requires": { + "whatwg-url": "^5.0.0" + } + }, + "tr46": { + "version": "0.0.3", + "resolved": "https://registry.npmmirror.com/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==" + }, + "webidl-conversions": { + "version": "3.0.1", + "resolved": "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==" + }, + "whatwg-url": { + "version": "5.0.0", + "resolved": "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "requires": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + } + } + }, "cross-spawn": { "version": "7.0.3", "resolved": "https://registry.nlark.com/cross-spawn/download/cross-spawn-7.0.3.tgz", @@ -19658,6 +19899,30 @@ "bser": "2.1.1" } }, + "fbemitter": { + "version": "3.0.0", + "resolved": "https://registry.npmmirror.com/fbemitter/-/fbemitter-3.0.0.tgz", + "integrity": "sha512-KWKaceCwKQU0+HPoop6gn4eOHk50bBv/VxjJtGMfwmJt3D29JpN4H4eisCtIPA+a8GVBam+ldMMpMjJUvpDyHw==", + "requires": { + "fbjs": "^3.0.0" + }, + "dependencies": { + "fbjs": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/fbjs/-/fbjs-3.0.4.tgz", + "integrity": "sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==", + "requires": { + "cross-fetch": "^3.1.5", + "fbjs-css-vars": "^1.0.0", + "loose-envify": "^1.0.0", + "object-assign": "^4.1.0", + "promise": "^7.1.1", + "setimmediate": "^1.0.5", + "ua-parser-js": "^0.7.30" + } + } + } + }, "fbjs": { "version": "0.8.18", "resolved": "https://registry.npmmirror.com/fbjs/download/fbjs-0.8.18.tgz", @@ -19679,6 +19944,11 @@ } } }, + "fbjs-css-vars": { + "version": "1.0.2", + "resolved": "https://registry.npmmirror.com/fbjs-css-vars/-/fbjs-css-vars-1.0.2.tgz", + "integrity": "sha512-b2XGFAFdWZWg0phtAWLHCk836A1Xann+I+Dgd3Gk64MHKZO44FfoD1KxyvbSh0qZsIoXQGGlVztIY+oitJPpRQ==" + }, "fecha": { "version": "4.2.1", "resolved": "https://registry.npmmirror.com/fecha/download/fecha-4.2.1.tgz", @@ -19717,6 +19987,31 @@ "resolved": "https://registry.npmmirror.com/flatten/download/flatten-1.0.3.tgz", "integrity": "sha512-dVsPA/UwQ8+2uoFe5GHtiBMu48dWLTdsuEd7CKGlZlD78r1TTWBvDuFaFGKCo/ZfEr95Uk56vZoX86OsHkUeIg== sha1-wSg6yfJ7Noq8HjbR/3sEUBowNWs=" }, + "flux": { + "version": "4.0.3", + "resolved": "https://registry.npmmirror.com/flux/-/flux-4.0.3.tgz", + "integrity": "sha512-yKAbrp7JhZhj6uiT1FTuVMlIAT1J4jqEyBpFApi1kxpGZCvacMVc/t1pMQyotqHhAgvoE3bNvAykhCo2CLjnYw==", + "requires": { + "fbemitter": "^3.0.0", + "fbjs": "^3.0.1" + }, + "dependencies": { + "fbjs": { + "version": "3.0.4", + "resolved": "https://registry.npmmirror.com/fbjs/-/fbjs-3.0.4.tgz", + "integrity": "sha512-ucV0tDODnGV3JCnnkmoszb5lf4bNpzjv80K41wd4k798Etq+UYD0y0TIfalLjZoKgjive6/adkRnszwapiDgBQ==", + "requires": { + "cross-fetch": "^3.1.5", + "fbjs-css-vars": "^1.0.0", + "loose-envify": "^1.0.0", + "object-assign": "^4.1.0", + "promise": "^7.1.1", + "setimmediate": "^1.0.5", + "ua-parser-js": "^0.7.30" + } + } + } + }, "follow-redirects": { "version": "1.14.7", "resolved": "https://registry.npmmirror.com/follow-redirects/download/follow-redirects-1.14.7.tgz", @@ -22765,11 +23060,21 @@ "resolved": "https://registry.npmmirror.com/lodash.clonedeep/download/lodash.clonedeep-4.5.0.tgz", "integrity": "sha1-4j8/nE+Pvd6HJSnBBxhXoIblzO8= sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==" }, + "lodash.curry": { + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/lodash.curry/-/lodash.curry-4.1.1.tgz", + "integrity": "sha512-/u14pXGviLaweY5JI0IUzgzF2J6Ne8INyzAZjImcryjgkZ+ebruBxy2/JaOOkTqScddcYtakjhSaeemV8lR0tA==" + }, "lodash.debounce": { "version": "4.0.8", "resolved": "https://registry.nlark.com/lodash.debounce/download/lodash.debounce-4.0.8.tgz?cache=0&sync_timestamp=1622605323058&other_urls=https%3A%2F%2Fregistry.nlark.com%2Flodash.debounce%2Fdownload%2Flodash.debounce-4.0.8.tgz", "integrity": "sha1-gteb/zCmfEAF/9XiUVMArZyk168=" }, + "lodash.flow": { + "version": "3.5.0", + "resolved": "https://registry.npmmirror.com/lodash.flow/-/lodash.flow-3.5.0.tgz", + "integrity": "sha512-ff3BX/tSioo+XojX4MOsOMhJw0nZoUEF011LX8g8d3gvjVbxd89cCio4BCXronjxcTUIJUoqKEUA+n4CqvvRPw==" + }, "lodash.isequal": { "version": "4.5.0", "resolved": "https://registry.npmmirror.com/lodash.isequal/download/lodash.isequal-4.5.0.tgz", @@ -24211,6 +24516,11 @@ "resolved": "https://registry.nlark.com/punycode/download/punycode-2.1.1.tgz?cache=0&sync_timestamp=1622604519710&other_urls=https%3A%2F%2Fregistry.nlark.com%2Fpunycode%2Fdownload%2Fpunycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A== sha1-tYsBCsQMIsVldhbI0sLALHv0eew=" }, + "pure-color": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/pure-color/-/pure-color-1.3.0.tgz", + "integrity": "sha512-QFADYnsVoBMw1srW7OVKEYjG+MbIa49s54w1MA1EDY6r2r/sTcKKYqRX1f4GYvnXP7eN/Pe9HFcX+hwzmrXRHA==" + }, "qs": { "version": "6.10.3", "resolved": "https://registry.npmmirror.com/qs/download/qs-6.10.3.tgz", @@ -24736,6 +25046,17 @@ "object-assign": "^4.1.1" } }, + "react-base16-styling": { + "version": "0.6.0", + "resolved": "https://registry.npmmirror.com/react-base16-styling/-/react-base16-styling-0.6.0.tgz", + "integrity": "sha512-yvh/7CArceR/jNATXOKDlvTnPKPmGZz7zsenQ3jUwLzHkNUR0CvY3yGYJbWJ/nnxsL8Sgmt5cO3/SILVuPO6TQ==", + "requires": { + "base16": "^1.0.0", + "lodash.curry": "^4.0.1", + "lodash.flow": "^3.3.0", + "pure-color": "^1.2.0" + } + }, "react-dom": { "version": "17.0.2", "resolved": "https://registry.npmmirror.com/react-dom/download/react-dom-17.0.2.tgz", @@ -24792,6 +25113,17 @@ "resolved": "https://registry.npmmirror.com/react-is/download/react-is-17.0.2.tgz", "integrity": "sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== sha1-5pHUqOnHiTZWVVOas3J2Kw77VPA=" }, + "react-json-view": { + "version": "1.21.3", + "resolved": "https://registry.npmmirror.com/react-json-view/-/react-json-view-1.21.3.tgz", + "integrity": "sha512-13p8IREj9/x/Ye4WI/JpjhoIwuzEgUAtgJZNBJckfzJt1qyh24BdTm6UQNGnyTq9dapQdrqvquZTo3dz1X6Cjw==", + "requires": { + "flux": "^4.0.1", + "react-base16-styling": "^0.6.0", + "react-lifecycles-compat": "^3.0.4", + "react-textarea-autosize": "^8.3.2" + } + }, "react-lifecycles-compat": { "version": "3.0.4", "resolved": "https://registry.npmmirror.com/react-lifecycles-compat/download/react-lifecycles-compat-3.0.4.tgz", @@ -24922,6 +25254,16 @@ "scheduler": "^0.20.2" } }, + "react-textarea-autosize": { + "version": "8.3.4", + "resolved": "https://registry.npmmirror.com/react-textarea-autosize/-/react-textarea-autosize-8.3.4.tgz", + "integrity": "sha512-CdtmP8Dc19xL8/R6sWvtknD/eCXkQr30dtvC4VmGInhRsfF8X/ihXCq6+9l9qbxmKRiq407/7z5fxE7cVWQNgQ==", + "requires": { + "@babel/runtime": "^7.10.2", + "use-composed-ref": "^1.3.0", + "use-latest": "^1.2.1" + } + }, "react-tween-state": { "version": "0.1.5", "resolved": "https://registry.nlark.com/react-tween-state/download/react-tween-state-0.1.5.tgz", @@ -26795,12 +27137,32 @@ "resolved": "https://registry.nlark.com/use/download/use-3.1.1.tgz", "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ== sha1-1QyMrHmhn7wg8pEfVuuXP04QBw8=" }, + "use-composed-ref": { + "version": "1.3.0", + "resolved": "https://registry.npmmirror.com/use-composed-ref/-/use-composed-ref-1.3.0.tgz", + "integrity": "sha512-GLMG0Jc/jiKov/3Ulid1wbv3r54K9HlMW29IWcDFPEqFkSO2nS0MuefWgMJpeHQ9YJeXDL3ZUF+P3jdXlZX/cQ==", + "requires": {} + }, + "use-isomorphic-layout-effect": { + "version": "1.1.2", + "resolved": "https://registry.npmmirror.com/use-isomorphic-layout-effect/-/use-isomorphic-layout-effect-1.1.2.tgz", + "integrity": "sha512-49L8yCO3iGT/ZF9QttjwLF/ZD9Iwto5LnH5LmEdk/6cFmXddqi2ulF0edxTwjj+7mqvpVVGQWvbXZdn32wRSHA==", + "requires": {} + }, "use-json-comparison": { "version": "1.0.6", "resolved": "https://registry.npmmirror.com/use-json-comparison/download/use-json-comparison-1.0.6.tgz", "integrity": "sha512-xPadt5yMRbEmVfOSGFSMqjjICrq7nLbfSH3rYIXsrtcuFX7PmbYDN/ku8ObBn3v8o/yZelO1OxUS5+5TI3+fUw== sha1-oBK7wljOdF2x9WdF3GU/V1ImyyE=", "requires": {} }, + "use-latest": { + "version": "1.2.1", + "resolved": "https://registry.npmmirror.com/use-latest/-/use-latest-1.2.1.tgz", + "integrity": "sha512-xA+AVm/Wlg3e2P/JiItTziwS7FK92LWrDB0p+hgXloIMuVCeJJ8v6f0eeHyPZaJrM+usM1FkFfbNCrJGs8A/zw==", + "requires": { + "use-isomorphic-layout-effect": "^1.1.1" + } + }, "use-media-antd-query": { "version": "1.1.0", "resolved": "https://registry.nlark.com/use-media-antd-query/download/use-media-antd-query-1.1.0.tgz", diff --git a/ymir/web/package.json b/ymir/web/package.json index cfe6b14684..b64cdc9eaa 100644 --- a/ymir/web/package.json +++ b/ymir/web/package.json @@ -1,12 +1,12 @@ { "name": "ymir-web", - "version": "1.1.0.0517", + "version": "2.0.0.1021", "scripts": { "local": "APP_ROOT=../pages/demo1 umi dev", "analyze": "cross-env UMI_ENV=dev ANALYZE=1 umi dev", "start": "cross-env MOCK=none UMI_ENV=dev umi dev", "start:local": "cross-env MOCK=none UMI_ENV=local umi dev", - "start:mock": "cross-env UMI_ENV=dev umi dev", + "start:mock": "cross-env UMI_ENV=local umi dev", "build": "cross-env UMI_ENV=dev umi build", "build:dev": "cross-env UMI_ENV=dev umi build", "build:prod": "cross-env UMI_ENV=prod umi build", @@ -66,6 +66,8 @@ "echarts": "^5.2.2", "react": "17.x", "react-dom": "17.x", + "react-json-view": "^1.21.3", + "react-xml-viewer": "^1.3.0", "socket.io-client": "^4.4.1", "umi": "^3.5.20" }, diff --git a/ymir/web/public/683f4fa14d1baa733a87d9644bb0457cbed5aba8 b/ymir/web/public/683f4fa14d1baa733a87d9644bb0457cbed5aba8 index 4d94ee865a..92ee79acf4 100644 Binary files a/ymir/web/public/683f4fa14d1baa733a87d9644bb0457cbed5aba8 and b/ymir/web/public/683f4fa14d1baa733a87d9644bb0457cbed5aba8 differ diff --git a/ymir/web/public/config/config.js b/ymir/web/public/config/config.js deleted file mode 100644 index 913de9a6e9..0000000000 --- a/ymir/web/public/config/config.js +++ /dev/null @@ -1,3 +0,0 @@ -window.baseConfig = { - "APIURL": "/api/v1/" -} \ No newline at end of file diff --git a/ymir/web/public/config/config.js.template b/ymir/web/public/config/config.js.template new file mode 100644 index 0000000000..c7bdfcb43f --- /dev/null +++ b/ymir/web/public/config/config.js.template @@ -0,0 +1,4 @@ +window.baseConfig = { + "DEPLOY_MODULE_URL": "${DEPLOY_MODULE_URL}", + "APIURL": "/api/v1/" +} \ No newline at end of file diff --git a/ymir/web/public/mining.zip b/ymir/web/public/mining.zip index 8097fb593e..f0ae1fa33e 100644 Binary files a/ymir/web/public/mining.zip and b/ymir/web/public/mining.zip differ diff --git a/ymir/web/public/sample_dataset.zip b/ymir/web/public/sample_dataset.zip index 22dcd44cb1..4cfbae713c 100644 Binary files a/ymir/web/public/sample_dataset.zip and b/ymir/web/public/sample_dataset.zip differ diff --git a/ymir/web/public/val.zip b/ymir/web/public/val.zip index 9038bfa893..cc0c74b1db 100644 Binary files a/ymir/web/public/val.zip and b/ymir/web/public/val.zip differ diff --git a/ymir/web/src/assets/icons/iconfont.css b/ymir/web/src/assets/icons/iconfont.css index 8428d4192e..e26a4c18a5 100644 --- a/ymir/web/src/assets/icons/iconfont.css +++ b/ymir/web/src/assets/icons/iconfont.css @@ -1,6 +1,6 @@ @font-face { font-family: "iconfont"; /* Project id */ - src: url('iconfont.ttf?t=1651198552473') format('truetype'); + src: url('iconfont.ttf?t=1656927622762') format('truetype'); } .iconfont { @@ -423,3 +423,39 @@ content: "\e6fb"; } +.icon-start:before { + content: "\e6fc"; +} + +.icon-stop1:before { + content: "\e6fd"; +} + +.icon-bell:before { + content: "\e6fe"; +} + +.icon-wait:before { + content: "\e6ff"; +} + +.icon-tipts:before { + content: "\e700"; +} + +.icon-diagnosis2:before { + content: "\e701"; +} + +.icon-government-line:before { + content: "\e702"; +} + +.icon-diagnosis:before { + content: "\e703"; +} + +.icon-scene:before { + content: "\e704"; +} + diff --git a/ymir/web/src/assets/icons/iconfont.js b/ymir/web/src/assets/icons/iconfont.js index 36527f033b..3d6fcabea5 100644 --- a/ymir/web/src/assets/icons/iconfont.js +++ b/ymir/web/src/assets/icons/iconfont.js @@ -1 +1 @@ -!function(a){var h,l,v,o,z,t='',m=(m=document.getElementsByTagName("script"))[m.length-1].getAttribute("data-injectcss"),i=function(a,h){h.parentNode.insertBefore(a,h)};if(m&&!a.__iconfont__svg__cssinject__){a.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(a){console&&console.log(a)}}function c(){z||(z=!0,v())}function p(){try{o.documentElement.doScroll("left")}catch(a){return void setTimeout(p,50)}c()}h=function(){var a,h=document.createElement("div");h.innerHTML=t,t=null,(h=h.getElementsByTagName("svg")[0])&&(h.setAttribute("aria-hidden","true"),h.style.position="absolute",h.style.width=0,h.style.height=0,h.style.overflow="hidden",h=h,(a=document.body).firstChild?i(h,a.firstChild):a.appendChild(h))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(l=function(){document.removeEventListener("DOMContentLoaded",l,!1),h()},document.addEventListener("DOMContentLoaded",l,!1)):document.attachEvent&&(v=h,o=a.document,z=!1,p(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,c())})}(window); \ No newline at end of file +!function(a){var h,l,v,o,z,t='',m=(m=document.getElementsByTagName("script"))[m.length-1].getAttribute("data-injectcss"),i=function(a,h){h.parentNode.insertBefore(a,h)};if(m&&!a.__iconfont__svg__cssinject__){a.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(a){console&&console.log(a)}}function c(){z||(z=!0,v())}function p(){try{o.documentElement.doScroll("left")}catch(a){return void setTimeout(p,50)}c()}h=function(){var a,h=document.createElement("div");h.innerHTML=t,t=null,(h=h.getElementsByTagName("svg")[0])&&(h.setAttribute("aria-hidden","true"),h.style.position="absolute",h.style.width=0,h.style.height=0,h.style.overflow="hidden",h=h,(a=document.body).firstChild?i(h,a.firstChild):a.appendChild(h))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(l=function(){document.removeEventListener("DOMContentLoaded",l,!1),h()},document.addEventListener("DOMContentLoaded",l,!1)):document.attachEvent&&(v=h,o=a.document,z=!1,p(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,c())})}(window); \ No newline at end of file diff --git a/ymir/web/src/assets/icons/iconfont.json b/ymir/web/src/assets/icons/iconfont.json index 575a464ffb..b780932adb 100644 --- a/ymir/web/src/assets/icons/iconfont.json +++ b/ymir/web/src/assets/icons/iconfont.json @@ -725,6 +725,69 @@ "font_class": "pretreatment", "unicode": "e6fb", "unicode_decimal": 59131 + }, + { + "icon_id": "100896692", + "name": "start", + "font_class": "start", + "unicode": "e6fc", + "unicode_decimal": 59132 + }, + { + "icon_id": "100896693", + "name": "stop", + "font_class": "stop1", + "unicode": "e6fd", + "unicode_decimal": 59133 + }, + { + "icon_id": "100896694", + "name": "bell", + "font_class": "bell", + "unicode": "e6fe", + "unicode_decimal": 59134 + }, + { + "icon_id": "100896695", + "name": "wait", + "font_class": "wait", + "unicode": "e6ff", + "unicode_decimal": 59135 + }, + { + "icon_id": "100896696", + "name": "tipts", + "font_class": "tipts", + "unicode": "e700", + "unicode_decimal": 59136 + }, + { + "icon_id": "102565537", + "name": "diagnosis2", + "font_class": "diagnosis2", + "unicode": "e701", + "unicode_decimal": 59137 + }, + { + "icon_id": "102565538", + "name": "government-line", + "font_class": "government-line", + "unicode": "e702", + "unicode_decimal": 59138 + }, + { + "icon_id": "102565539", + "name": "diagnosis", + "font_class": "diagnosis", + "unicode": "e703", + "unicode_decimal": 59139 + }, + { + "icon_id": "102565540", + "name": "scene", + "font_class": "scene", + "unicode": "e704", + "unicode_decimal": 59140 } ] } diff --git a/ymir/web/src/assets/icons/iconfont.ttf b/ymir/web/src/assets/icons/iconfont.ttf index 4844f7ae3a..4057d198bf 100644 Binary files a/ymir/web/src/assets/icons/iconfont.ttf and b/ymir/web/src/assets/icons/iconfont.ttf differ diff --git a/ymir/web/src/assets/sample.png b/ymir/web/src/assets/sample.png index 6e80e8bd62..1be32c63ad 100644 Binary files a/ymir/web/src/assets/sample.png and b/ymir/web/src/assets/sample.png differ diff --git a/ymir/web/src/components/chart/bar.js b/ymir/web/src/components/chart/bar.js index 514f958c58..b7ef454eb0 100644 --- a/ymir/web/src/components/chart/bar.js +++ b/ymir/web/src/components/chart/bar.js @@ -22,7 +22,6 @@ const Chart = ({ option = {}, height = 300 }) => { useEffect(() => { let barChart = null - // console.log('option: ', option, chartRef) if (chartRef.current) { const chart = chartRef.current barChart = echarts.init(chart) diff --git a/ymir/web/src/components/chart/line.js b/ymir/web/src/components/chart/line.js new file mode 100644 index 0000000000..6f16005e56 --- /dev/null +++ b/ymir/web/src/components/chart/line.js @@ -0,0 +1,61 @@ +import { useEffect, useRef, useState } from "react" +import * as echarts from 'echarts/core'; +import { + TooltipComponent, + GridComponent, + LegendComponent, + MarkLineComponent +} from 'echarts/components' +import { + LineChart, +} from 'echarts/charts' +import { + CanvasRenderer +} from 'echarts/renderers' + +echarts.use( + [TooltipComponent, GridComponent, LegendComponent, MarkLineComponent, LineChart, CanvasRenderer] +); + +const Chart = ({ option = {}, height = 300, style = {}, ...rest }) => { + const chartRef = useRef(null) + + useEffect(() => { + let lineChart = null + if (chartRef.current) { + setTimeout(() => { + lineChart = echarts.init(chartRef.current) + lineChart.setOption(option) + }, 50) + } + return () => { + lineChart && lineChart.dispose() + } + }, [option]) + + window.addEventListener('resize', () => { + if (!chartRef.current) { + return + } + const lineChart = echarts.getInstanceByDom(chartRef.current) + if (lineChart && lineChart.resize) { + const container = chartRef.current.parentElement + var wi = getStyle(container, 'width') + chartRef.current.style.width = wi + lineChart.resize() + } + }) + + function getStyle(el, name) { + if (window.getComputedStyle) { + return window.getComputedStyle(el, null)[name] + } else { + return el.currentStyle[name] + } + } + + + return
     
    +} + +export default Chart diff --git a/ymir/web/src/components/common/CheckProjectDirty.js b/ymir/web/src/components/common/CheckProjectDirty.js index 6f3c01cad9..48c185a005 100644 --- a/ymir/web/src/components/common/CheckProjectDirty.js +++ b/ymir/web/src/components/common/CheckProjectDirty.js @@ -1,31 +1,48 @@ -import { Button, Space, Tag } from "antd" -import useProjectStatus from "@/hooks/useProjectStatus" +import { Button, Row, Col } from "antd" +import useFetch from "@/hooks/useFetch" import { useEffect, useState } from "react" +import { useSelector } from 'umi' import t from '@/utils/t' +import s from './common.less' +import { FailIcon, SuccessIcon } from "./icons" -const CheckProjectDirty = ({ pid, initialCheck, callback = () => {}, ...props }) => { - const { checkDirty } = useProjectStatus(pid) - const [isDirty, setDirty] = useState(null) +const CheckProjectDirty = ({ pid, initialCheck, callback = () => { }, ...props }) => { + const effect = 'project/checkStatus' + const [{ is_dirty: isDirty }, check] = useFetch(effect, {}, true) const [checked, setChecked] = useState(false) + const loading = useSelector(({ loading }) => loading.effects[effect]) + useEffect(() => { initialCheck && checkStatus() }, []) - async function checkStatus() { - const dirty = await checkDirty() - setDirty(dirty) + useEffect(() => { + checked && callback(isDirty) + }, [checked]) + + function checkStatus() { + check(pid) setChecked(true) - callback(dirty) } - return + return {checked ? - isDirty ? t('project.workspace.status.dirty', { dirtyLabel: Dirty }) - : t('project.workspace.status.clean', { cleanLabel: Clean }) + + {isDirty ? + <>{t('project.workspace.status.dirty', { + dirtyLabel: Dirty + })} : + <> {t('project.workspace.status.clean', { + cleanLabel: Clean + })} + } + : null} - - + + + + } export default CheckProjectDirty diff --git a/ymir/web/src/components/common/breadcrumb.js b/ymir/web/src/components/common/breadcrumb.js index baf133c993..c2cf55b7fc 100644 --- a/ymir/web/src/components/common/breadcrumb.js +++ b/ymir/web/src/components/common/breadcrumb.js @@ -1,7 +1,9 @@ +import { useEffect } from "react" import { Breadcrumb } from "antd" -import { Link, useHistory, useParams, useRouteMatch, } from "umi" +import { Link, useHistory, useParams, useRouteMatch, useSelector } from "umi" import { homeRoutes } from '@/config/routes' import t from '@/utils/t' +import useFetch from '@/hooks/useFetch' import s from './common.less' const { Item } = Breadcrumb @@ -32,8 +34,21 @@ function loop(id = 1, crumbs) { function Breadcrumbs({ suffix = '', titles = {} }) { const { path } = useRouteMatch() const params = useParams() || {} + const [project, getProject] = useFetch('project/getProject', {}) const crumbs = getCrumbs() const crumbItems = getCrumbItems(path, crumbs) + + useEffect(() => { + setTimeout(() => { + if (crumbItems.some(crumb => crumb.id === 25) && params.id) { + getProject({ id: params.id }) + } + }, 500) + }, [params.id]) + + const getLabel = (crumb, customTitle) => { + return (crumb.id === 25 ? project.name : customTitle) || t(crumb.label) + } return
    {crumbItems.map((crumb, index) => { @@ -41,7 +56,7 @@ function Breadcrumbs({ suffix = '', titles = {} }) { const link = crumb.path.replace(/:([^\/]+)/g, (str, key) => { return params[key] ? params[key] : '' }) - const label = titles[index] ? titles[index] : t(crumb.label) + const label = getLabel(crumb, titles[index]) return {last ? {label} {suffix} : {label}} diff --git a/ymir/web/src/components/common/common.less b/ymir/web/src/components/common/common.less index 52a8e16259..7b53c00115 100644 --- a/ymir/web/src/components/common/common.less +++ b/ymir/web/src/components/common/common.less @@ -39,8 +39,7 @@ z-index: 10000; } /** component: icons **/ -.cicon { - // margin-left: 6px; +:global(.ant-menu-item) .cicon, :global(.ant-menu-submenu-title) .cicon, .cicon { font-size: 20px; line-height: 20px; vertical-align: middle; @@ -91,3 +90,38 @@ } } +/** check project dirty **/ +.checkPanel { + @error: rgb(242, 99, 123); + @success: @primary-color; + @btnColor: #10BC5E; + color: rgba(0, 0, 0, 0.65); +.checkerError, .checkerSuccess { + border-radius: 2px; + line-height: 30px; + padding: 0 10px; +} +.checkerSuccess { + background: fade(@success, 10); + border: 1px solid @success; +} +.checkerError { + background: fade(@error, 10); + border: 1px solid @error; +} + .checkBtn { + background-color: @btnColor; + color: #fff; + border-color:@btnColor; + &:hover { + background-color: lighten(@btnColor, 10%); + border-color: lighten(@btnColor, 10%); + } + } + .success { + color: @success; + } + .error { + color: @error; + } +} diff --git a/ymir/web/src/components/common/descPop.js b/ymir/web/src/components/common/descPop.js new file mode 100644 index 0000000000..cdb5a928a2 --- /dev/null +++ b/ymir/web/src/components/common/descPop.js @@ -0,0 +1,4 @@ +export const DescPop = ({ description = '', ...rest }) => { + const text = description.split(/\n/) + return
    {text.map((txt, i) =>
    {txt}
    )}
    +} \ No newline at end of file diff --git a/ymir/web/src/components/common/icons.js b/ymir/web/src/components/common/icons.js index 75fa2b7e77..bad5528a92 100644 --- a/ymir/web/src/components/common/icons.js +++ b/ymir/web/src/components/common/icons.js @@ -2,6 +2,13 @@ import { createFromIconfontCN } from '@ant-design/icons' import iconUrl from '@/assets/icons/iconfont' import s from './common.less' +import Icon from './icons/Icon' + +import DeviceListSVG from './icons/DeviceListSVG' +import DeviceSupportedSVG from './icons/DeviceSupportedSVG' +import MyAlgoSVG from './icons/MyAlgoSVG' +import StoreSVG from './icons/StoreSVG' + const IconFont = createFromIconfontCN({ scriptUrl: iconUrl, }) @@ -10,6 +17,11 @@ const iconFont = type => props => + ) +} diff --git a/ymir/web/src/components/common/icons/DeviceSupportedSVG.tsx b/ymir/web/src/components/common/icons/DeviceSupportedSVG.tsx new file mode 100644 index 0000000000..04feedae5f --- /dev/null +++ b/ymir/web/src/components/common/icons/DeviceSupportedSVG.tsx @@ -0,0 +1,5 @@ +export default function DeviceSupportedSVG() { + return ( + + ) +} diff --git a/ymir/web/src/components/common/icons/Icon.tsx b/ymir/web/src/components/common/icons/Icon.tsx new file mode 100644 index 0000000000..6169baf800 --- /dev/null +++ b/ymir/web/src/components/common/icons/Icon.tsx @@ -0,0 +1,41 @@ +import React, { useEffect, useRef } from 'react' +import { IconProps } from './IconProps' +import styles from './style.css' +import s from '../common.less' + +// icon hoc +export default function Icon(SvgContent: React.FC) { + function generateIcon(props: IconProps) { + const root = useRef(null) + const { size = '1em', width, height, spin, rtl, color, fill, stroke, className, ...rest } = props; + const _width = width || size; + const _height = height || size; + const _stroke = stroke || color; + const _fill = fill || color; + useEffect(() => { + if (!_fill) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-fill]').forEach(item => { + item.setAttribute('fill', item.getAttribute('data-follow-fill') || '') + }) + } + if (!_stroke) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-stroke]').forEach(item => { + item.setAttribute('stroke', item.getAttribute('data-follow-stroke') || '') + }) + } + }, [stroke, color, fill]) + + return + + + } + return generateIcon +} diff --git a/ymir/web/src/components/common/icons/IconProps.ts b/ymir/web/src/components/common/icons/IconProps.ts new file mode 100644 index 0000000000..6bbe089e94 --- /dev/null +++ b/ymir/web/src/components/common/icons/IconProps.ts @@ -0,0 +1,10 @@ +export interface IconProps extends React.SVGProps { + size?: string | number; + width?: string | number; + height?: string | number; + spin?: boolean; + rtl?: boolean; + color?: string; + fill?: string; + stroke?: string; +} \ No newline at end of file diff --git a/ymir/web/src/components/common/icons/MyAlgoSVG.tsx b/ymir/web/src/components/common/icons/MyAlgoSVG.tsx new file mode 100644 index 0000000000..a4ce2add8e --- /dev/null +++ b/ymir/web/src/components/common/icons/MyAlgoSVG.tsx @@ -0,0 +1,3 @@ +export default function MyAlgoSVG() { + return () +} diff --git a/ymir/web/src/components/common/icons/StoreSVG.tsx b/ymir/web/src/components/common/icons/StoreSVG.tsx new file mode 100644 index 0000000000..47b11b75df --- /dev/null +++ b/ymir/web/src/components/common/icons/StoreSVG.tsx @@ -0,0 +1,5 @@ +export default function StoreSVG() { + return ( + + ) +} diff --git a/ymir/web/src/components/common/icons/style.css b/ymir/web/src/components/common/icons/style.css new file mode 100644 index 0000000000..2da8ab39c6 --- /dev/null +++ b/ymir/web/src/components/common/icons/style.css @@ -0,0 +1,9 @@ +.spin,.spin svg {animation: iconpark-spin 1s infinite linear;} +.rtl,.rtl svg {transform: scaleX(-1);} +.spin.rtl,.spin.rtl svg {animation: iconpark-spin-rtl 1s infinite linear;} +@keyframes iconpark-spin { + 0% { -webkit-transform: rotate(0); transform: rotate(0);} 100% {-webkit-transform: rotate(360deg); transform: rotate(360deg);} +} +@keyframes iconpark-spin-rtl { + 0% {-webkit-transform: scaleX(-1) rotate(0); transform: scaleX(-1) rotate(0);} 100% {-webkit-transform: scaleX(-1) rotate(360deg); transform: scaleX(-1) rotate(360deg);} +} \ No newline at end of file diff --git a/ymir/web/src/components/common/ignoreKeywords.js b/ymir/web/src/components/common/ignoreKeywords.js deleted file mode 100644 index 0f88637bc2..0000000000 --- a/ymir/web/src/components/common/ignoreKeywords.js +++ /dev/null @@ -1,21 +0,0 @@ - -import { Col, Row, Tag } from "antd" - -import AddKeywordsBtn from "../keyword/addKeywordsBtn" - -function IgnoreKeywords({ keywords = [] }) { - return ( - - - {keywords.map((keyword) => ( - {keyword} - ))} - - - {keywords.length ? : null } - - - ) -} - -export default IgnoreKeywords diff --git a/ymir/web/src/components/common/leftMenu.js b/ymir/web/src/components/common/leftMenu.js new file mode 100644 index 0000000000..9cd4ac787f --- /dev/null +++ b/ymir/web/src/components/common/leftMenu.js @@ -0,0 +1,108 @@ +import { useEffect, useState } from "react" +import { Menu, Layout } from "antd" +import { useHistory, useLocation, withRouter, useSelector } from "umi" +import t from '@/utils/t' +import { getDeployUrl } from '@/constants/common' +import { isSuperAdmin } from '@/constants/user' +import { + BarchartIcon, FlagIcon, GithubIcon, FileHistoryIcon, MymodelIcon, + NavDatasetIcon, UserIcon, UserSettingsIcon, DiagnosisIcon, EditIcon, EyeOffIcon, TrainIcon, + DeviceListIcon, DeviceSupportedIcon, MyAlgoIcon, StoreIcon, + BarChart2LineIcon, ProjectIcon, VectorIcon, BookIcon, +} from '@/components/common/icons' +import IterationIcon from '@/components/icon/Xiangmudiedai' + +const { Sider } = Layout + +const projectModule = /^.*\/project\/(\d+).*$/ + +const getItem = (label, key, Icon, children, type = '') => ({ + key, icon: Icon ? : null, children, label, type, +}) + +const getGroupItem = (label, key, children) => getItem(label, key, undefined, children, 'group') + +function LeftMenu() { + const role = useSelector(state => state.user.role) + const projects = useSelector(state => state.project.projects) + const history = useHistory() + const { pathname } = useLocation() + const [defaultKeys, setDefaultKeys] = useState(null) + const [items, setItems] = useState([]) + const [id, setId] = useState(null) + const [project, setProject] = useState({}) + + useEffect(() => { + setDefaultKeys(pathname) + const id = pathname.replace(projectModule, '$1') + setId(id) + }, [pathname]) + + useEffect(() => { + id && projects && setProject(projects[id] || {}) + }, [id, projects]) + + useEffect(() => { + const showLeftMenu = projectModule.test(pathname) + setItems([ + getGroupItem(t('breadcrumbs.projects'), 'project', [ + getItem(t('projects.title'), `/home/project`, ProjectIcon,), + showLeftMenu ? getItem(project.name, `project.summary`, VectorIcon, [ + getItem(t('project.summary'), `/home/project/${id}/detail`, BarchartIcon,), + getItem(t('project.iterations.title'), `/home/project/${id}/iterations`, IterationIcon,), + getItem(t('dataset.list'), `/home/project/${id}/dataset`, NavDatasetIcon,), + getItem(t('breadcrumbs.dataset.analysis'), `/home/project/${id}/dataset/analysis`, BarChart2LineIcon), + getItem(t('model.management'), `/home/project/${id}/model`, MymodelIcon,), + getItem(t('model.diagnose'), `/home/project/${id}/diagnose`, DiagnosisIcon), + getItem(t('breadcrumbs.task.training'), `/home/project/${id}/train`, TrainIcon), + getItem(t('common.hidden.list'), `/home/project/${id}/hidden`, EyeOffIcon,), + getItem(t('project.settings.title'), `/home/project/${id}/add`, EditIcon,), + ]) : null, + ]), + getGroupItem(t('breadcrumbs.keyword'), 'keyword', [ + getItem(t('breadcrumbs.keyword'), '/home/keyword', FlagIcon,), + ]), + getDeployUrl() ? getGroupItem(t('algo.label'), 'algo', [ + getItem(t('algo.public.label'), '/home/algo', StoreIcon,), + getItem(t('algo.mine.label'), '/home/algo/mine', MyAlgoIcon,), + getItem(t('algo.device.label'), '/home/algo/device', DeviceListIcon,), + getItem(t('algo.support.label'), '/home/algo/support', DeviceSupportedIcon,), + ]) : null, + getGroupItem(t('common.top.menu.configure'), 'settings', [ + getItem(t('common.top.menu.image'), '/home/image', FileHistoryIcon,), + isSuperAdmin(role) ? getItem(t('common.top.menu.permission'), '/home/permission', UserSettingsIcon,) : null, + ]), + { type: 'divider' }, + getItem(
    + + {t('common.menu.docs')} + , 'outer/docs'), + getItem(t('user.settings'), '/home/user', UserIcon,), + getItem( + + {t('common.top.menu.community')} + , 'outer/github'), + ]) + }, [id, project, role]) + + const clickHandle = ({ key }) => { + const outer = /^outer\//.test(key) + if (!outer) { + setDefaultKeys([key]) + history.push(key) + } + } + + return items.length ? ( + + + + ) : null +} +export default withRouter(LeftMenu) diff --git a/ymir/web/src/components/common/loading.js b/ymir/web/src/components/common/loading.js index 54631c957a..0e4532e921 100644 --- a/ymir/web/src/components/common/loading.js +++ b/ymir/web/src/components/common/loading.js @@ -1,19 +1,15 @@ import { Spin } from "antd" -import { connect } from "dva" +import { useSelector } from 'umi' import styles from "./common.less" -function Loading({ loading }) { +function Loading() { + const globalLoading = useSelector(({ loading }) => loading.global && !loading.models.Verify) + const commonLoading = useSelector(({ common }) => common.loading) return ( -
    +
    ) } -const mapStateToProps = (state) => { - return { - loading: state.loading.global && !state.loading.models.Verify, - } -} - -export default connect(mapStateToProps, null)(Loading) +export default Loading diff --git a/ymir/web/src/components/common/progress.js b/ymir/web/src/components/common/progress.js index 915b90f14b..a99731a9be 100644 --- a/ymir/web/src/components/common/progress.js +++ b/ymir/web/src/components/common/progress.js @@ -1,22 +1,23 @@ import { Col, Progress, Row } from "antd" import { getLocale, Link } from "umi" -import { states } from "@/constants/dataset" +import { ResultStates } from "@/constants/common" import t from "@/utils/t" import StateTag from "../task/stateTag" import { calTimeLeft } from "@/utils/date" import { InprogressIcon } from "./icons" function RenderProgress(state, { id, progress, createTime, taskState, task = {} }, simple = false) { - if (states.READY === state && task?.is_terminated) { + if (ResultStates.READY === state && task?.is_terminated) { return t('task.state.terminating') } if (!taskState) { return } - const percent = Math.floor(progress * 100) + const fixedProgress = ResultStates.VALID !== state && progress === 1 ? 0.99 : progress + const percent = Math.floor(fixedProgress * 100) const stateTag = - return state === states.READY ? ( + return state === ResultStates.READY ? ( {stateTag} diff --git a/ymir/web/src/components/common/recommendKeywords.js b/ymir/web/src/components/common/recommendKeywords.js index 5b9aa8bc52..89ef29db88 100644 --- a/ymir/web/src/components/common/recommendKeywords.js +++ b/ymir/web/src/components/common/recommendKeywords.js @@ -3,22 +3,21 @@ import { connect } from 'dva' import { useEffect, useState } from 'react' import t from "@/utils/t" +import useFetch from '@/hooks/useFetch' import s from './common.less' -const RecommendKeywords = ({ global = false, sets, limit = 5, onSelect = () => { }, getRecommendKeywords }) => { - const [keywords, setKeywords] = useState([]) +const RecommendKeywords = ({ global = false, sets, limit = 5, onSelect = () => { } }) => { + const [keywords, getKeywords] = useFetch('keyword/getRecommendKeywords', []) useEffect(() => { - if (global || sets?.length) { + if (global || sets) { fetchKeywords() } }, [sets]) - async function fetchKeywords() { - const result = await getRecommendKeywords({ global, dataset_ids: sets, limit }) - if (result) { - setKeywords(result) - } + function fetchKeywords() { + const ids = Array.isArray(sets) ? sets : [sets] + getKeywords({ global, dataset_ids: ids, limit }) } return ( @@ -29,22 +28,4 @@ const RecommendKeywords = ({ global = false, sets, limit = 5, onSelect = () => { ) } -const props = (state) => { - return { - - } -} - -const actions = (dispatch) => { - return { - getRecommendKeywords(payload) { - return dispatch({ - type: 'keyword/getRecommendKeywords', - payload, - }) - } - } -} - - -export default connect(props, actions)(RecommendKeywords) +export default RecommendKeywords diff --git a/ymir/web/src/components/dataset/asset_annotation.js b/ymir/web/src/components/dataset/assetAnnotation.js similarity index 60% rename from ymir/web/src/components/dataset/asset_annotation.js rename to ymir/web/src/components/dataset/assetAnnotation.js index 7256ec9521..cc11fc3256 100644 --- a/ymir/web/src/components/dataset/asset_annotation.js +++ b/ymir/web/src/components/dataset/assetAnnotation.js @@ -1,12 +1,15 @@ -import styles from "./common.less" import { useEffect, useState, useRef } from "react" -import { percent } from "../../utils/number" +import { Col, Popover, Row, Space } from "antd" + +import { percent } from "@/utils/number" +import t from '@/utils/t' +import { evaluationLabel } from '@/constants/dataset' + +import styles from "./common.less" function AssetAnnotation({ url, data = [], - colors = ["green", "red", "cyan", "blue", "yellow", "purple", "magenta", "orange", "gold"], - keywords = [], }) { const [annotations, setAnnotations] = useState([]) const imgContainer = useRef() @@ -21,12 +24,10 @@ function AssetAnnotation({ const transAnnotations = (items) => { setAnnotations(() => { - let index = 0 - return items.map(({ keyword, box, score, color = '#000' }) => { + return items.map(({ box, score, ...item }) => { return { - keyword, + ...item, score: score ? percent(score) : null, - color, ...box, } }) @@ -34,12 +35,24 @@ function AssetAnnotation({ } const renderAnnotations = () => { - // console.log('annotations: ', annotations) return annotations.map((annotation, index) => { - return ( + const evaluatedLabel = evaluationLabel(annotation.cm) + const emptyTags = Object.keys(annotation.tags).length === 0 + const popContent = <> + {t('keyword.column.name')}{annotation.keyword} + {evaluatedLabel ? Evaluation{evaluationLabel(annotation.cm)} : null} + {annotation.score ? {t('model.verify.confidence')}{annotation.score} : null} + {!emptyTags ? {t('dataset.assets.keyword.selector.types.tags')} + {Object.keys(annotation.tags).map(tag => + {tag}: {annotation.tags[tag]} + )} + + : null} + + return
    - {annotation.keyword} - {annotation.score ? <> {annotation.score} : null} + {annotation.keyword} + {annotation.score ? <> {annotation.score} : null}
    - ) +
    }) } diff --git a/ymir/web/src/components/dataset/common.less b/ymir/web/src/components/dataset/common.less index f1fc82c72a..114e8571b2 100644 --- a/ymir/web/src/components/dataset/common.less +++ b/ymir/web/src/components/dataset/common.less @@ -15,36 +15,50 @@ top: 0; margin-left: 50%; height: 100%; - .annotation { +} +.annotation { + position: absolute; + top: 0; + left: 0; + width: 100px; + height: 100px; + border: 1px solid rgba(255, 24, 24, 0.6); + background-color: rgba(255, 255, 255, 0.3); + border-radius: 4px; + &.gt { + border-style: dashed; + } + .annotationTitle, .confidence { position: absolute; - top: 0; - left: 0; - width: 100px; - height: 100px; - border: 1px solid rgba(255, 24, 24, 0.6); - background-color: rgba(255, 255, 255, 0.3); - border-radius: 4px; - .annotationTitle, .confidence { - position: absolute; - bottom: 0; - display: block; - width: 100%; - // height: 28px; - line-height: 22px; - text-align: center; - text-overflow: ellipsis; - overflow: hidden; - background-color: rgba(0, 0, 0, 0.4); - opacity: 0.6; - color: #fff; - text-shadow: #333 0px 1px; - font-size: 14px; - font-weight: normal; - white-space: nowrap; - } + bottom: 0; + display: block; + width: 100%; + // height: 28px; + line-height: 22px; + text-align: center; + text-overflow: ellipsis; + overflow: hidden; + background-color: rgba(0, 0, 0, 0.4); + opacity: 0.6; + color: #fff; + text-shadow: #333 0px 1px; + font-size: 14px; + font-weight: normal; + white-space: nowrap; + } +} +.ic_container { + position: relative; + display: flex; + align-items: center; + justify-content: center; + height: 100%; + width: 100%; + img { + width: 100%; + height: 100%; } } - .tableWrapper { display: flex; justify-content: space-between; diff --git a/ymir/web/src/components/dataset/detail.js b/ymir/web/src/components/dataset/detail.js index 53ceea655d..e2adb9ce40 100644 --- a/ymir/web/src/components/dataset/detail.js +++ b/ymir/web/src/components/dataset/detail.js @@ -1,19 +1,36 @@ -import React from "react" +import React, { useEffect } from "react" import { useHistory } from "umi" import { Button, Col, Descriptions, Row, Tag } from "antd" import t from "@/utils/t" -import { states } from '@/constants/common' +import { ResultStates } from '@/constants/common' import styles from "./detail.less" import { SearchIcon } from "@/components/common/icons" +import { DescPop } from "../common/descPop" const { Item } = Descriptions +const labelStyle = { width: '15%', paddingRight: '20px', justifyContent: 'flex-end' } function DatasetDetail({ dataset = {} }) { const history = useHistory() + const { cks = {}, tags = {}, inferClass } = dataset - const labelStyle = { width: '15%', paddingRight: '20px', justifyContent: 'flex-end' } + const renderKeywords = (anno, label = t('annotation.gt')) => { + if (!anno) { + return + } + const { keywords = [], count = {} } = anno + return

    + {label}: {keywords.map(keyword => {keyword}({count[keyword]}))} +

    + } + const renderCk = (label = 'ck', keywords = []) => keywords.length ? + {keywords.map(({ keyword, children }) => + {keyword} + {children.map(({ keyword, count }) => {keyword}({count}))} + )} + : null return (
    {dataset.name} {dataset.versionName} - +
    ) diff --git a/ymir/web/src/components/dataset/imageAnnotation.js b/ymir/web/src/components/dataset/imageAnnotation.js new file mode 100644 index 0000000000..cc203aae48 --- /dev/null +++ b/ymir/web/src/components/dataset/imageAnnotation.js @@ -0,0 +1,96 @@ +import styles from "./common.less" +import { useEffect, useState, useRef } from "react" + +function ImageAnnotation({ + url, + data = [], + filters = a => a, +}) { + const [annotations, setAnnotations] = useState([]) + const img = useRef() + const [box, setBox] = useState({ + width: 0, + height: 0, + }) + const [ratio, setRatio] = useState(1) + + useEffect(() => { + transAnnotations(data) + }, [data]) + + const transAnnotations = (items) => { + setAnnotations(() => { + return items.map(({ box, score, ...item }) => { + return { + ...item, + ...box, + } + }) + }) + } + + const renderAnnotations = () => filters(annotations).map((annotation, index) =>
    ) + + function calImgWidth(target) { + const im = target || img.current + if (!im) { + return + } + const cw = im.clientWidth + const iw = im.naturalWidth || 0 + setBox({ + width: cw, + height: im.clientHeight, + }) + setRatio(cw / iw) + } + let calculating = false + window.addEventListener('resize', () => { + if (calculating) return + calculating = true + window.setTimeout(() => { + if (img.current) { + calImgWidth() + } + calculating = false + }, 2000) + }) + + return ( +
    + calImgWidth()} + /> +
    + {renderAnnotations()} +
    +
    + ) +} + +export default ImageAnnotation diff --git a/ymir/web/src/components/dataset/keywordRates.js b/ymir/web/src/components/dataset/keywordRates.js index c62969bf6f..15e52c2acb 100644 --- a/ymir/web/src/components/dataset/keywordRates.js +++ b/ymir/web/src/components/dataset/keywordRates.js @@ -1,6 +1,4 @@ import { useState, useEffect } from "react" -import { Row, Col, Progress, } from "antd" -import { connect } from 'dva' import t from "@/utils/t" import s from "./keywordRates.less" @@ -10,95 +8,64 @@ function randomColor() { return "#" + Math.random().toString(16).slice(-6) } -function KeywordRates({ dataset, progressWidth = 0.5, getKeywordRates }) { - const [data, setData] = useState({}) +const getWidth = ({ count = 0, max }, progressWidth) => percent(count * progressWidth / max) + +// type stats = { count = {}, keywords, negative = 0, total } +function KeywordRates({ title = '', stats, progressWidth = 0.5 }) { const [list, setList] = useState([]) + const [keywords, setKws] = useState([]) + const [colors, setColors] = useState({}) useEffect(() => { - if(dataset?.id) { - setData(dataset) - } else if(dataset) { - fetchRates(dataset) - } else { - setData({}) - setList([]) - } - }, [dataset]) + const kws = stats?.keywords || [] + setKws(kws) + }, [stats]) useEffect(() => { - if (data) { - const klist = prepareList(data) - setList(klist) - } - }, [data]) + const keywordColors = (keywords || []).reduce((prev, keyword) => (colors[keyword] ? prev : { + ...prev, + [keyword]: randomColor(), + }), { + 0: 'gray' + }) + setColors({ ...colors, ...keywordColors }) + }, [keywords]) - async function fetchRates(id) { - const result = await getKeywordRates(id) - if (result) { - setData(result) - } - } + useEffect(() => { + setList(keywords.length && stats ? generateList(stats, colors) : []) + }, [stats, colors]) - function prepareList(dataset = {}) { - if (!dataset?.id) { - return [] - } - const { assetCount, keywordsCount, nagetiveCount, projectNagetiveCount, project: { keywords = [] } } = dataset - const filter = keywords.length ? keywords : Object.keys(keywordsCount) - const neg = keywords.length ? projectNagetiveCount : nagetiveCount - const kwList = getKeywordList(keywordsCount, filter, neg) - const widthRate = assetCount / Math.max(...(kwList.map(item => item.count))) - const getWidth = (count) => percent(count * progressWidth * widthRate / assetCount) - return kwList.map(item => ({ - ...item, - width: getWidth(item.count), - percent: percent(item.count / assetCount), - total: assetCount, - color: randomColor(), - })) - } - function getKeywordList(keywords = {}, filterKeywords, negative) { - const klist = filterKeywords.map(keyword => { - const count = keywords[keyword] || 0 - return { - keyword, count - } - }) - return [ - ...klist, - { - keyword: t('dataset.samples.negative'), - count: negative, - } + function generateList({ count = {}, keywords = [], negative = 0, total }, colors) { + const klist = [ + ...(keywords.map(kw => ({ + key: kw, + label: kw, + count: count[kw], + total: total ? total : count[kw + '_total'], + }))), ] + const max = Math.max(...(klist.map(item => item.count || 0)), progressWidth) + return klist.map(item => ({ + ...item, + max, + color: colors[item.key], + })) } - function format({ percent = 0, keyword = '', count = 0, total }) { - return `${keyword} ${count}/${total} ${percent}` - } - - return list.length ? ( -
    - {list.map(item => ( -
    -   - {format(item)} -
    - ))} -
    - ) : null -} - -const actions = (dispatch) => { - return { - getKeywordRates(id, force) { - return dispatch({ - type: 'dataset/getDataset', - payload: { id, force }, - }) - } + function label({ count = 0, label = '', total }) { + const countLabel = total ? count / total : 0 + return `${label} ${count}/${total} ${percent(countLabel)}` } + return list.length ?
    +
    {title}
    + {list.map(item => ( +
    +   + {label(item)} +
    + ))} +
    : null } -export default connect(null, actions)(KeywordRates) +export default KeywordRates diff --git a/ymir/web/src/components/dataset/keywordRates.less b/ymir/web/src/components/dataset/keywordRates.less index 357f262b30..13bf22305e 100644 --- a/ymir/web/src/components/dataset/keywordRates.less +++ b/ymir/web/src/components/dataset/keywordRates.less @@ -1,5 +1,5 @@ .rate { - margin: 5px 0; + margin: 2px 0; height: 20px; line-height: 20px; span { @@ -8,7 +8,14 @@ .bar { display: inline-block; margin-right: 10px; - min-width: 10px; + min-width: 2px; height: 10px; } +} +.rates { + margin-bottom: 12px; +} +.title { + // margin-bottom: 8px; + font-weight: bold; } \ No newline at end of file diff --git a/ymir/web/src/components/dataset/list.js b/ymir/web/src/components/dataset/list.js index 226b36b09c..d770ef2179 100644 --- a/ymir/web/src/components/dataset/list.js +++ b/ymir/web/src/components/dataset/list.js @@ -2,14 +2,16 @@ import React, { useEffect, useRef, useState } from "react" import { connect } from 'dva' import styles from "./list.less" import { Link, useHistory, useLocation } from "umi" -import { Form, Button, Input, Table, Space, Modal, Row, Col, Tooltip, Pagination, message, } from "antd" +import { Form, Button, Input, Table, Space, Modal, Row, Col, Tooltip, Pagination, message, Popover, } from "antd" import t from "@/utils/t" import { humanize } from "@/utils/number" import { diffTime } from '@/utils/date' -import { getTaskTypeLabel, TASKSTATES } from '@/constants/task' -import { states } from '@/constants/dataset' +import { getTaskTypeLabel, TASKSTATES, TASKTYPES } from '@/constants/task' +import { ResultStates } from '@/constants/common' +import { canHide } from '@/constants/dataset' +import CheckProjectDirty from "@/components/common/CheckProjectDirty" import StateTag from "@/components/task/stateTag" import EditBox from "@/components/form/editBox" import Terminate from "@/components/task/terminate" @@ -21,12 +23,16 @@ import Actions from "@/components/table/actions" import { ImportIcon, ScreenIcon, TaggingIcon, TrainIcon, VectorIcon, WajueIcon, SearchIcon, EditIcon, EyeOffIcon, CopyIcon, StopIcon, ArrowDownIcon, ArrowRightIcon, CompareIcon, + CompareListIcon, } from "@/components/common/icons" +import { DescPop } from "../common/descPop" +import { RefreshIcon } from "../common/icons" +import useRerunAction from "../../hooks/useRerunAction" const { confirm } = Modal const { useForm } = Form -function Datasets({ pid, project = {}, iterations, group, datasetList, query, versions, ...func }) { +function Datasets({ pid, project = {}, iterations, groups, datasetList, query, versions, ...func }) { const location = useLocation() const { name } = location.query const history = useHistory() @@ -35,11 +41,13 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve const [total, setTotal] = useState(1) const [form] = useForm() const [current, setCurrent] = useState({}) - const [visibles, setVisibles] = useState(group ? { [group]: true } : {}) - const [selectedVersions, setSelectedVersions] = useState({}) + const [visibles, setVisibles] = useState({}) + const [selectedVersions, setSelectedVersions] = useState({ selected: [], versions: {} }) const hideRef = useRef(null) let [lock, setLock] = useState(true) const terminateRef = useRef(null) + const [testingSetIds, setTestingSetIds] = useState([]) + const generateRerun = useRerunAction() /** use effect must put on the top */ useEffect(() => { @@ -49,10 +57,16 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve setLock(false) }, [history.location]) + useEffect(() => { + const initVisibles = groups.reduce((prev, group) => ({ ...prev, [group]: true }), {}) + setVisibles(initVisibles) + }, [groups]) + useEffect(() => { const list = setGroupLabelsByProject(datasetList.items, project) setDatasets(list) setTotal(datasetList.total) + setTestingSetIds(project?.testingSets || []) }, [datasetList, project]) useEffect(() => { @@ -126,13 +140,19 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve key: "name", dataIndex: "versionName", className: styles[`column_name`], - render: (name, { id, state, projectLabel, iterationLabel }) => - {name} - - {projectLabel ?
    {projectLabel}
    : null} - {iterationLabel ?
    {iterationLabel}
    : null} - -
    , + render: (name, { id, description, projectLabel, iterationLabel }) => { + const popContent = + const content = + {name} + + {projectLabel ?
    {projectLabel}
    : null} + {iterationLabel ?
    {iterationLabel}
    : null} + +
    + return description ? + {content} + : content + }, filters: getRoundFilter(gid), onFilter: (round, { iterationRound }) => round === iterationRound, ellipsis: true, @@ -156,12 +176,19 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve { title: showTitle("dataset.column.keyword"), dataIndex: "keywords", - render: (keywords) => { - const label = t('dataset.column.keyword.label', { keywords: keywords.join(', '), total: keywords.length }) - return { + const renderLine = (keywords, label = 'gt') =>
    +
    {t(`annotation.${label}`)}:
    + {t('dataset.column.keyword.label', { + keywords: keywords.join(', '), + total: keywords.length + })} +
    + const label = <>{renderLine(gt.keywords)}{renderLine(pred.keywords, 'pred')} + return isValidDataset(state) ?
    {label}
    + >
    {label}
    : null }, ellipsis: { showTitle: false, @@ -170,7 +197,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve { title: showTitle('dataset.column.state'), dataIndex: 'state', - render: (state, record) => RenderProgress(state, record, true), + render: (state, record) => RenderProgress(state, record), // width: 60, }, { @@ -194,55 +221,56 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } const actionMenus = (record) => { - const { id, groupId, state, taskState, task, isProtected } = record + const { id, groupId, state, taskState, task, assetCount } = record + const invalidDataset = ({ state, assetCount }) => !isValidDataset(state) || assetCount === 0 const menus = [ { - key: "fusion", - label: t("dataset.action.fusion"), - hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/task/fusion/${pid}?did=${id}`), - icon: , + key: "label", + label: t("dataset.action.label"), + hidden: () => invalidDataset(record), + onclick: () => history.push(`/home/project/${pid}/label?did=${id}`), + icon: , }, { key: "train", label: t("dataset.action.train"), - hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/task/train/${pid}?did=${id}`), + hidden: () => invalidDataset(record) || isTestingDataset(id), + onclick: () => history.push(`/home/project/${pid}/train?did=${id}`), icon: , }, { key: "mining", label: t("dataset.action.mining"), - hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/task/mining/${pid}?did=${id}`), + hidden: () => invalidDataset(record), + onclick: () => history.push(`/home/project/${pid}/mining?did=${id}`), icon: , }, { - key: "inference", - label: t("dataset.action.inference"), + key: "merge", + label: t("common.action.merge"), hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/task/inference/${pid}?did=${id}`), - icon: , + onclick: () => history.push(`/home/project/${pid}/merge?did=${id}`), + icon: , }, { - key: "label", - label: t("dataset.action.label"), - hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/task/label/${pid}?did=${id}`), - icon: , + key: "filter", + label: t("common.action.filter"), + hidden: () => invalidDataset(record), + onclick: () => history.push(`/home/project/${pid}/filter?did=${id}`), + icon: , }, { - key: "compare", - label: t("common.action.compare"), - hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/project/${pid}/dataset/${groupId}/compare/${id}`), - icon: , + key: "inference", + label: t("dataset.action.inference"), + hidden: () => invalidDataset(record), + onclick: () => history.push(`/home/project/${pid}/inference?did=${id}`), + icon: , }, { key: "copy", label: t("task.action.copy"), - hidden: () => !isValidDataset(state), - onclick: () => history.push(`/home/task/copy/${pid}?did=${id}`), + hidden: () => invalidDataset(record), + onclick: () => history.push(`/home/project/${pid}/copy?did=${id}`), icon: , }, { @@ -252,11 +280,12 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve hidden: () => taskState === TASKSTATES.PENDING || !isRunning(state) || task.is_terminated, icon: , }, + generateRerun(record), { key: "hide", label: t("common.action.hide"), onclick: () => hide(record), - hidden: () => hideHidden(record), + hidden: () => !canHide(record, project), icon: , }, ] @@ -266,8 +295,6 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve const tableChange = ({ current, pageSize }, filters, sorters = {}) => { } - const hideHidden = ({ state, id }) => isRunning(state) || project?.hiddenDatasets?.includes(id) - const getTypeFilter = gid => { return getFilters(gid, 'taskType', (type) => t(getTaskTypeLabel(type))) } @@ -286,7 +313,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve const listChange = (current, pageSize) => { const limit = pageSize const offset = (current - 1) * pageSize - func.updateQuery({ ...query, limit, offset }) + func.updateQuery({ ...query, current, limit, offset }) } function showTitle(str) { @@ -307,6 +334,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve function setGroupLabelsByProject(datasets, project) { return datasets.map(item => { + delete item.projectLabel item = setLabelByProject(project?.trainSet?.id, 'isTrainSet', item) item = setLabelByProject(project?.testSet?.groupId, 'isTestSet', item, project?.testSet?.versionName) item = setLabelByProject(project?.miningSet?.groupId, 'isMiningSet', item, project?.miningSet?.versionName) @@ -318,8 +346,10 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve Object.keys(versions).forEach(gid => { const list = versions[gid] const updatedList = list.map(item => { - item = setLabelByProject(project?.testSet?.id, 'isTestSet', item) - item = setLabelByProject(project?.miningSet?.id, 'isMiningSet', item) + delete item.projectLabel + const field = item.id === project?.testSet?.id ? 'isTestSet' : + (item.id === project?.miningSet?.id ? 'isMiningSet' : (isTestingDataset(item.id) ? 'isTestingSet' : '')) + field && (item = setLabelByProject(item.id, field, item)) return { ...item } }) versions[gid] = updatedList @@ -331,6 +361,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve Object.keys(versions).forEach(gid => { const list = versions[gid] const updatedList = list.map(item => { + delete item.iterationLabel item = setLabelByIterations(item, iterations) return { ...item } }) @@ -344,6 +375,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve isTrainSet: 'project.tag.train', isTestSet: 'project.tag.test', isMiningSet: 'project.tag.mining', + isTestingSet: 'project.tag.testing', } item[label] = id && item.id === id item.projectLabel = item.projectLabel || (item[label] ? t(maps[label], { version }) : '') @@ -366,7 +398,13 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } function rowSelectChange(gid, rowKeys) { - setSelectedVersions(old => ({ ...old, [gid]: rowKeys })) + setSelectedVersions(({ versions }) => { + versions[gid] = rowKeys + return { + selected: Object.values(versions).flat(), + versions: { ...versions }, + } + }) } const stop = (dataset) => { @@ -397,7 +435,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } const add = () => { - history.push(`/home/dataset/add/${pid}`) + history.push(`/home/project/${pid}/dataset/add`) } @@ -414,26 +452,8 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } } - const multipleCompare = () => { - const ids = Object.values(selectedVersions).flat() - const vss = Object.values(versions).flat().filter(({ id }) => ids.includes(id)) - const groups = [...new Set(vss.map(item => item.groupId))] - const diffGroup = groups.length > 1 - if (diffGroup) { - // diff group - return message.error(t('dataset.compare.error.diff_group')) - } - - const diffAssets = [...new Set(vss.map(item => item.assetCount))].length > 1 - if (diffAssets) { - // diff assets count - return message.error(t('dataset.compare.error.diff_assets')) - } - history.push(`/home/project/${pid}/dataset/${groups[0]}/compare/${ids}`) - } - const multipleHide = () => { - const ids = Object.values(selectedVersions).flat() + const ids = selectedVersions.selected const allVss = Object.values(versions).flat() const vss = allVss.filter(({ id }) => ids.includes(id)) hideRef.current.hide(vss, project.hiddenDatasets) @@ -449,15 +469,35 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve const hideOk = (result) => { result.forEach(item => fetchVersions(item.dataset_group_id, true)) fetchDatasets(true) - setSelectedVersions({}) + setSelectedVersions({ selected: [], versions: {} }) + } + + const multipleInfer = () => { + const ids = selectedVersions.selected.join('|') + history.push(`/home/project/${pid}/inference?did=${ids}`) + } + + const batchMerge = () => { + const ids = selectedVersions.selected.join(',') + history.push(`/home/project/${pid}/merge?mid=${ids}`) + } + + const getDisabledStatus = (filter = () => { }) => { + const allVss = Object.values(versions).flat() + const { selected } = selectedVersions + return !selected.length || allVss.filter(({ id }) => selected.includes(id)).some(version => filter(version)) } function isValidDataset(state) { - return states.VALID === state + return ResultStates.VALID === state } function isRunning(state) { - return state === states.READY + return state === ResultStates.READY + } + + function isTestingDataset(id) { + return testingSetIds?.includes(id) } const addBtn = ( @@ -466,16 +506,17 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve ) - const renderMultipleActions = Object.values(selectedVersions).flat().length ? ( - <> - - - - ) : null + const renderMultipleActions = <> + + + + const renderGroups = (<>
    @@ -496,9 +537,8 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve onChange={tableChange} rowKey={(record) => record.id} rowSelection={{ - selectedRowKeys: selectedVersions[group.id], + selectedRowKeys: selectedVersions.versions[group.id], onChange: (keys) => rowSelectChange(group.id, keys), - getCheckboxProps: (record) => ({ disabled: isRunning(record.state), }), }} rowClassName={(record, index) => index % 2 === 0 ? '' : 'oddRow'} columns={columns(group.id)} @@ -507,17 +547,23 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve
    )}
    - + ) return (
    -
    - - {addBtn} - {renderMultipleActions} - -
    + + + + {addBtn} + {renderMultipleActions} + + + + + +
    { + iid && getMiningStats({ pid, id: iid }) + }, [iid]) + + return
    + + +
    +} + +export default MiningSampleRates diff --git a/ymir/web/src/components/dataset/sampleRates.js b/ymir/web/src/components/dataset/sampleRates.js new file mode 100644 index 0000000000..b828c4cee1 --- /dev/null +++ b/ymir/web/src/components/dataset/sampleRates.js @@ -0,0 +1,66 @@ +import { useState, useEffect } from "react" +import { useParams, useSelector } from 'umi' +import { Button } from "antd" + +import t from "@/utils/t" +import useFetch from '@/hooks/useFetch' +import KeywordRates from "./keywordRates" + +function SampleRates({ keywords, dataset, negative, label, progressWidth = 0.5 }) { + const { id: pid } = useParams() + const [did, setDid] = useState(null) + const effect = 'dataset/getNegativeKeywords' + const [stats, getNegativeKeywords, setStats] = useFetch(effect, {}, true) + const loading = useSelector(({ loading }) => loading.effects[effect]) + + useEffect(() => { + dataset?.id && setDid(dataset.id) + }, [dataset]) + + useEffect(() => { + setStats({}) + }, [did, keywords]) + + const addNegativeInfo = (stat = {}) => { + if (!stat.keywords?.length) { + return {} + } + const key = t('dataset.samples.negative') + return { + ...stat, + keywords: [...stat.keywords, key], + count: { + ...stat.count, + [key]: stat.negative, + } + } + } + + useEffect(() => { + const synced = keywords?.length && did === dataset?.id + if (!negative && did && synced) { + fetchKeywords(pid, keywords, did) + } + }, [did, keywords]) + + function fetchKeywords(projectId, keywords = [], dataset) { + keywords.length && getNegativeKeywords({ projectId, keywords, dataset }) + } + + return
    + {label ?

    {label}

    : null} + {negative && !stats.gt ?
    + +
    : null} + + +
    +} + +export default SampleRates diff --git a/ymir/web/src/components/empty/dataset.js b/ymir/web/src/components/empty/dataset.js index cfeb2ee1d6..3be10889f4 100644 --- a/ymir/web/src/components/empty/dataset.js +++ b/ymir/web/src/components/empty/dataset.js @@ -4,13 +4,18 @@ import { Button, Space } from 'antd' import t from '@/utils/t' import styles from './empty.less' import { NoSjjIcon, ImportIcon, } from '@/components/common/icons' +import { useHistory, useParams } from 'umi' -export default ({ style = {}, add = () => {} }) => ( - - -

    {t("dataset.empty.label")}

    - -
    -) \ No newline at end of file +export default ({ style = {} }) => { + const { id } = useParams() + const history = useHistory() + return ( + + +

    {t("dataset.empty.label")}

    + +
    + ) +} diff --git a/ymir/web/src/components/empty/default.js b/ymir/web/src/components/empty/default.js index 77f1fba76f..dc6204dda3 100644 --- a/ymir/web/src/components/empty/default.js +++ b/ymir/web/src/components/empty/default.js @@ -2,6 +2,6 @@ import { Empty } from "antd" import nodataImg from '@/assets/nodata.png' -export default () => ( - +export default (props) => ( + ) \ No newline at end of file diff --git a/ymir/web/src/components/empty/model.js b/ymir/web/src/components/empty/model.js index 32f3493e1c..17a52b9b8e 100644 --- a/ymir/web/src/components/empty/model.js +++ b/ymir/web/src/components/empty/model.js @@ -1,20 +1,26 @@ import { Button, Space } from 'antd' -import { useHistory } from 'umi' +import { useHistory, useParams } from 'umi' import t from '@/utils/t' import styles from './empty.less' -import { NoXlmxIcon, TrainIcon, } from '@/components/common/icons' +import { NoXlmxIcon, TrainIcon, ImportIcon } from '@/components/common/icons' -export default ({ id, style={} }) => { +export default ({ style = {} }) => { + const { id } = useParams() const history = useHistory() return (

    {t("model.empty.label")}

    - + + + +
    ) } diff --git a/ymir/web/src/components/form/checkboxSelector.js b/ymir/web/src/components/form/checkboxSelector.js new file mode 100644 index 0000000000..78bf55bc38 --- /dev/null +++ b/ymir/web/src/components/form/checkboxSelector.js @@ -0,0 +1,24 @@ +import { Checkbox, Col, Form, Row } from "antd" +import { useEffect, useState } from "react" + + +const CheckboxSelector = ({ options = [], label = '', value, onChange = () => { }, vertical, labelAlign, ...rest }) => { + const [checkeds, setCheckeds] = useState([]) + + useEffect(() => setCheckeds(value), [value]) + + useEffect(() => onChange(checkeds), [checkeds]) + + return + {label} + + + + +} + +export default CheckboxSelector diff --git a/ymir/web/src/components/form/datasetSelect.js b/ymir/web/src/components/form/datasetSelect.js index 78c54c9077..c4c3cf854c 100644 --- a/ymir/web/src/components/form/datasetSelect.js +++ b/ymir/web/src/components/form/datasetSelect.js @@ -1,32 +1,67 @@ -import { Select } from 'antd' -import { connect } from 'umi' +import { Col, ConfigProvider, Row, Select } from 'antd' +import { useSelector } from 'umi' import { useEffect, useState } from 'react' + import t from '@/utils/t' +import useFetch from '@/hooks/useFetch' +import EmptyState from '@/components/empty/dataset' + +const defaultLabelRender = ({ name, versionName, assetCount }) => { + const label = `${name} ${versionName}(assets: ${assetCount})` + return name ? {label} : null +} -const DatasetSelect = ({ pid, filter = [], filterGroup = [], filters, value, datasets = [], onChange = () => { }, getDatasets, ...resProps }) => { +const DatasetSelect = ({ + pid, filter = [], allowEmpty, filterGroup = [], + filters, value, onChange = () => { }, renderLabel = defaultLabelRender, + onReady = () => { }, + extra, changeByUser, ...resProps +}) => { const [options, setOptions] = useState([]) + const datasets = useSelector(({ dataset }) => dataset.allDatasets) + const [_, getDatasets] = useFetch('dataset/queryAllDatasets', [], true) + const [val, setVal] = useState(value) + + useEffect(() => setVal(value), [value]) useEffect(() => { pid && fetchDatasets() }, [pid]) + useEffect(() => { + onReady(datasets) + }, [datasets]) + useEffect(() => { let selected = null - if (value) { + if (options.length && value && !changeByUser) { if (resProps.mode) { selected = options.filter(opt => value.includes(opt.value)) } else { selected = options.find(opt => value === opt.value) } - onChange(value, selected) + if (selected) { + onChange(value, selected) + } else { + onChange(undefined, null) + setVal(undefined) + } } }, [options]) useEffect(() => { - const dss = filters ? filters(datasets) : datasets + const needReload = datasets.some(ds => ds.needReload) + if (needReload) { + fetchDatasets() + } + }, [datasets]) + + useEffect(() => { + let dss = filters ? filters(datasets) : datasets + dss = allowEmpty ? dss : filterEmptyAsset(dss) const opts = dss.filter(ds => !filter.includes(ds.id) && !filterGroup.includes(ds.groupId)).map(item => { return { - label: <>{item.name} {item.versionName}(assets: {item.assetCount}), + label: renderLabel(item), dataset: item, value: item.id, } @@ -34,37 +69,30 @@ const DatasetSelect = ({ pid, filter = [], filterGroup = [], filters, value, dat setOptions(opts) }, [filters, datasets]) - function fetchDatasets() { - getDatasets(pid, true) + async function fetchDatasets() { + await getDatasets({ pid, force: true }) + } + + function filterEmptyAsset(datasets) { + return datasets.filter(ds => ds.assetCount) } - return ( + const select = }> - ) -} + -const props = (state) => { - return { - datasets: state.dataset.allDatasets, - } + return extra ? {select}{extra} : select } -const actions = (dispatch) => { - return { - getDatasets(pid, force) { - return dispatch({ - type: 'dataset/queryAllDatasets', - payload: { pid, force }, - }) - } - } -} -export default connect(props, actions)(DatasetSelect) + +export default DatasetSelect diff --git a/ymir/web/src/components/form/desc.js b/ymir/web/src/components/form/desc.js new file mode 100644 index 0000000000..bdd222cb91 --- /dev/null +++ b/ymir/web/src/components/form/desc.js @@ -0,0 +1,12 @@ +import { Form, Input } from "antd" +import t from '@/utils/t' + +export default function Desc({ label='common.desc', name = 'description' }) { + return + + +} diff --git a/ymir/web/src/components/form/editBox.js b/ymir/web/src/components/form/editBox.js index eee2eb6f0a..d3b013fc17 100644 --- a/ymir/web/src/components/form/editBox.js +++ b/ymir/web/src/components/form/editBox.js @@ -32,6 +32,7 @@ const EditBox = ({ children, record, max=50, action = () => { } }) => { onCancel={onCancel} onOk={onOk} destroyOnClose + forceRender > + +export default EvaluationSelector diff --git a/ymir/web/src/components/form/gtSelector.js b/ymir/web/src/components/form/gtSelector.js new file mode 100644 index 0000000000..39b090ec25 --- /dev/null +++ b/ymir/web/src/components/form/gtSelector.js @@ -0,0 +1,15 @@ +import t from "@/utils/t" +import CheckboxSelector from "./checkboxSelector" + +const types = [ + { label: 'annotation.gt', value: 'gt', checked: true, }, + { label: 'annotation.pred', value: 'pred', }, +] + +const GtSelector = props => ({ ...type, label: t(type.label)}))} + label={t('dataset.assets.selector.gt.label')} + {...props} +/> + +export default GtSelector diff --git a/ymir/web/src/components/form/imageSelect.js b/ymir/web/src/components/form/imageSelect.js index cfeb3d085c..d7204f20ca 100644 --- a/ymir/web/src/components/form/imageSelect.js +++ b/ymir/web/src/components/form/imageSelect.js @@ -1,11 +1,14 @@ -import { Select } from 'antd' +import { Col, Row, Select } from 'antd' import { connect } from 'dva' import { useEffect, useState } from 'react' import { TYPES } from '@/constants/image' +import { HIDDENMODULES } from '@/constants/common' import t from '@/utils/t' -const ImageSelect = ({ value, relatedId, type = TYPES.TRAINING, onChange = () => {}, getImages, getImage, ...resProps }) => { +const getValue = image => image.id + ',' + image.url + +const ImageSelect = ({ value, relatedId, type = TYPES.TRAINING, onChange = () => { }, getImages, getImage, ...resProps }) => { const [options, setOptions] = useState([]) useEffect(() => { @@ -14,7 +17,12 @@ const ImageSelect = ({ value, relatedId, type = TYPES.TRAINING, onChange = () => useEffect(() => { if (options.length === 1) { - value = options[0].value + if (value) { + const opt = options.find(({ image }) => getValue(image) === value) + opt && onChange(value, opt.image) + } else { + value = options[0].value + } } }, [options]) @@ -32,9 +40,12 @@ const ImageSelect = ({ value, relatedId, type = TYPES.TRAINING, onChange = () => } const generateOption = image => ({ - label: image.name, + label: + {image.name} + {!HIDDENMODULES.LIVECODE ? {t(`image.livecode.label.${image.liveCode ? 'remote' : 'local'}`)} : null} + , image, - value: image.id + ',' + image.url, + value: getValue(image), }) async function generateOptions(images) { @@ -59,14 +70,16 @@ const ImageSelect = ({ value, relatedId, type = TYPES.TRAINING, onChange = () => async function getRelatedOptions() { const trainImage = await getImage(relatedId) let relatedOptions = [] - if(trainImage?.related) { + if (trainImage?.related) { relatedOptions = trainImage.related.map(generateOption) } return relatedOptions } return ( - + ) } diff --git a/ymir/web/src/components/form/inferResultSelect.js b/ymir/web/src/components/form/inferResultSelect.js new file mode 100644 index 0000000000..4a289991ce --- /dev/null +++ b/ymir/web/src/components/form/inferResultSelect.js @@ -0,0 +1,200 @@ +import { Button, Checkbox, Col, Form, Row, Select, Tooltip } from 'antd' +import { connect } from 'dva' +import { useCallback, useEffect, useState } from 'react' + +import t from '@/utils/t' +import useFetch from '@/hooks/useFetch' +import ModelSelect from './modelSelect' +import DatasetSelect from './datasetSelect' +import { useHistory } from 'umi' + +const sameConfig = (config, config2) => { + return JSON.stringify(config2) === JSON.stringify(config) +} +const sameConfigs = (config, configs) => { + return configs.some(item => sameConfig(item, config)) +} + +const ConfigSelect = ({ value, configs = [], onChange = () => { } }) => { + const [options, setOptions] = useState([]) + + useEffect(() => { + const opts = configs.map((item, index) => { + const title = [...item.model, JSON.stringify(item.config)].join('\n') + return { + value: index, + label: {item.name}, + config: item, + } + }) + setOptions(opts) + }, [configs]) + + useEffect(() => { + if (value) { + change(value, options.filter(opt => value.includes(opt.value))) + } + }, [options]) + + const change = (values) => { + onChange(values, values.map(index => options[index])) + } + + return +} + +const InferResultSelect = ({ pid, form, value, onChange = () => { } }) => { + const history = useHistory() + const [models, setModels] = useState([]) + const [datasets, setDatasets] = useState([]) + const [testingDatasets, setTestingDatasets] = useState([]) + const [selectedDatasets, setSelectedDatasets] = useState([]) + const [configs, setConfigs] = useState([]) + const [selectedConfigs, setSelectedConfigs] = useState([]) + const [inferTasks, fetchInferTask] = useFetch('task/queryInferTasks', []) + const [fetched, setFetched] = useState(false) + const [selectedTasks, setSelectedTasks] = useState([]) + const [tasks, setTasks] = useState([]) + const selectedStages = Form.useWatch('stage', form) + + useEffect(() => { + setTasks(inferTasks) + setFetched(true) + }, [inferTasks]) + + useEffect(() => { + const stages = selectedStages?.map(([model, stage]) => stage) || [] + if (stages.length) { + fetchInferTask({ stages }) + } else { + setTasks([]) + setFetched(false) + } + setSelectedDatasets([]) + form.setFieldsValue({ dataset: undefined, config: undefined }) + }, [selectedStages]) + + useEffect(() => { + if (datasets.length === 1) { + form.setFieldsValue({ dataset: datasets }) + } + setConfigs([]) + form.setFieldsValue({ config: undefined }) + }, [datasets]) + + useEffect(() => { + form.setFieldsValue({ config: configs.length === 1 ? [0] : undefined }) + }, [configs]) + + useEffect(() => { + const testingDatasets = tasks.map(({ parameters: { dataset_id } }) => dataset_id) + const crossDatasets = testingDatasets.filter(dataset => { + const targetTasks = tasks.filter(({ parameters: { dataset_id } }) => dataset_id === dataset) + return selectedStages.every(([model, stage]) => targetTasks.map(({ parameters: { model_stage_id } }) => model_stage_id).includes(stage)) + }) + setDatasets([...new Set(crossDatasets)]) + }, [tasks]) + + useEffect(() => { + const configs = tasks + .filter(({ parameters: { dataset_id } }) => (selectedDatasets ? selectedDatasets.includes(dataset_id) : true)) + .reduce((prev, { config, parameters: { model_id, model_stage_id } }) => { + const stageName = getStageName([model_id, model_stage_id]) + return sameConfigs(config, prev.map(({ config }) => config)) ? + prev.map(item => { + sameConfig(item.config, config) && item.model.push(stageName) + return item + }) : + [...prev, { config, model: [stageName] }] + }, []) + setConfigs(configs.map((config, index) => ({ ...config, name: `config${index + 1}` }))) + }, [tasks, selectedDatasets]) + + useEffect(() => { + form.setFieldsValue({ config: configs.map((_, index) => index) }) + }, [configs]) + + useEffect(() => { + const selected = [] + selectedStages?.forEach(([model, selectedStage]) => { + selectedDatasets.forEach(did => { + const dtask = tasks.filter(({ + parameters: { dataset_id, model_stage_id: stage } + }) => dataset_id === did && stage === selectedStage) + selectedConfigs.forEach(({ config: sconfig, name }) => { + const ctask = dtask.find(({ config }) => sameConfig(config, sconfig)) + ctask && selected.push({ ...ctask, configName: name }) + }) + }) + }) + setSelectedTasks(selected) + }, [tasks, selectedConfigs]) + + useEffect(() => { + onChange({ + tasks: selectedTasks, + models, + datasets: testingDatasets, + }) + }, [selectedTasks]) + + function getStageName([model, stage]) { + const m = models.find(md => md.id === model) + let s = {} + if (m) { + s = m.stages.find(sg => sg.id === stage) + } + return m && s ? `${m.name} ${m.versionName} ${s.name}` : '' + } + + function modelChange(values, options = []) { + // setSelectedStages(values) + setModels(options.map(([opt]) => opt?.model)) + } + + function datasetChange(values, options = []) { + setSelectedDatasets(values) + setTestingDatasets(options.map(({ dataset }) => dataset)) + } + + function configChange(values, options = []) { + setSelectedConfigs(options.map((opt) => opt ? opt.config : null)) + } + + const filterDatasets = useCallback((all) => { + return all.filter(({ id }) => datasets.includes(id)) + }, [datasets]) + + const goInfer = useCallback(() => { + const mids = selectedStages?.map(String)?.join('|') + const query = selectedStages?.length ? `?mid=${mids}` : '' + history.push(`/home/project/${pid}/inference${query}`) + }, [selectedStages]) + + const renderInferBtn =
    + {t('task.infer.diagnose.tip')} + +
    + + return ( + <> + + + + + + + ) +} + +const props = (state) => { + return { + allModels: state.model.allModels, + } +} + +export default connect(props, null)(InferResultSelect) diff --git a/ymir/web/src/components/form/items/datasetName.js b/ymir/web/src/components/form/items/datasetName.js new file mode 100644 index 0000000000..b9d399bdc6 --- /dev/null +++ b/ymir/web/src/components/form/items/datasetName.js @@ -0,0 +1,24 @@ +import { Form, Input } from "antd" +import t from '@/utils/t' + +const DatasetName = ({ + name = 'name', + itemProps = {}, + inputProps = {}, + prefix = 'dataset.add.form.name', +}) => { + const rules = [ + { required: true, whitespace: true, message: t(`${prefix}.required`) }, + { type: 'string', min: 2, max: 80 }, + ] + return + + +} + +export default DatasetName diff --git a/ymir/web/src/components/form/items/dockerConfig.js b/ymir/web/src/components/form/items/dockerConfig.js new file mode 100644 index 0000000000..9c740f76d9 --- /dev/null +++ b/ymir/web/src/components/form/items/dockerConfig.js @@ -0,0 +1,103 @@ +import { useEffect, useState } from "react" +import { Col, Form, Input, InputNumber, Row, Space } from "antd" +import Panel from "@/components/form/panel" +import t from '@/utils/t' +import s from "./form.less" +import PreProcessForm from "./preProcess" +import { AddTwoIcon, AddDelTwoIcon } from '@/components/common/icons' +function getArrayConfig(config = {}) { + const excludes = ['gpu_count', 'task_id'] + return Object.keys(config) + .filter(key => !excludes.includes(key)) + .map(key => ({ + key, + value: config[key] + })) +} + +const DockerConfigForm = ({ show, form, seniorConfig, name = 'hyperparam' }) => { + const [visible, setVisible] = useState(false) + const [config, setConfig] = useState([]) + const hyperParams = Form.useWatch('hyperparam', form) + + useEffect(() => setConfig(getArrayConfig(seniorConfig)), [seniorConfig]) + + useEffect(() => form.setFieldsValue({ [name]: config }), [config]) + + useEffect(() => setVisible(show), [show]) + + async function validHyperParams(rule, value) { + + const params = hyperParams.map(({ key }) => key) + .filter(item => item && item.trim() && item === value) + if (params.length > 1) { + return Promise.reject(t('task.validator.same.param')) + } else { + return Promise.resolve() + } + } + const renderTitle = <> + {t('task.train.form.hyperparam.label')} + {t('task.train.form.hyperparam.label.tip')} + + + return config.length ? + + + + {(fields, { add, remove }) => ( + <> + + + )} + + + + + : null +} + +export default DockerConfigForm diff --git a/ymir/web/src/pages/task/train/index.less b/ymir/web/src/components/form/items/form.less similarity index 100% rename from ymir/web/src/pages/task/train/index.less rename to ymir/web/src/components/form/items/form.less diff --git a/ymir/web/src/components/form/items/liveCode.js b/ymir/web/src/components/form/items/liveCode.js new file mode 100644 index 0000000000..ce8840e7ce --- /dev/null +++ b/ymir/web/src/components/form/items/liveCode.js @@ -0,0 +1,56 @@ +import Panel from "@/components/form/panel" +import { Button, Col, Form, Input, Row } from "antd" +import t from '@/utils/t' +import { getConfigUrl } from "./liveCodeConfig" +import { useEffect, useState } from "react" + + +const LiveCodeForm = ({ form, live }) => { + const url = Form.useWatch(['live', 'git_url'], form) + const id = Form.useWatch(['live', 'git_branch'], form) + const config = Form.useWatch(['live', 'code_config'], form) + const [configUrl, setConfigUrl] = useState('') + + useEffect(() => { + if (url && id && config) { + const configUrl = getConfigUrl({ + git_url: url, + git_branch: id, + code_config: config, + }) + setConfigUrl(configUrl) + } else { + setConfigUrl('') + } + }, [url, id, config]) + + return live ? + + + + + + + + + + + + + + + + + + + + : null +} + +export default LiveCodeForm diff --git a/ymir/web/src/components/form/items/liveCodeConfig.js b/ymir/web/src/components/form/items/liveCodeConfig.js new file mode 100644 index 0000000000..30f72f9d49 --- /dev/null +++ b/ymir/web/src/components/form/items/liveCodeConfig.js @@ -0,0 +1,26 @@ + +export const FIELDS = Object.freeze([ + { key: 'url', field: 'git_url', }, + { key: 'id', field: 'git_branch', }, + { key: 'config', field: 'code_config', }, +]) + +export const getConfigUrl = (config = {}) => { + const getField = index => config[FIELDS[index].field] || '' + const base = getField(0).replace(/(\.git)?$/, '') + const commit = getField(1) + const configFile = getField(2) + const url = `${base}/blob/${commit}/${configFile}` + return url +} + +export const isLiveCode = config => FIELDS.reduce((prev, curr) => prev && config[curr.field], true) + +export function removeLiveCodeConfig(config = {}) { + return Object.keys(config).reduce((prev, key) => FIELDS.map(({ field }) => field).includes(key) ? + prev : + { + ...prev, + [key]: config[key], + }, {}) +} \ No newline at end of file diff --git a/ymir/web/src/components/form/items/openpai.js b/ymir/web/src/components/form/items/openpai.js new file mode 100644 index 0000000000..de6412c7f0 --- /dev/null +++ b/ymir/web/src/components/form/items/openpai.js @@ -0,0 +1,21 @@ +import { Form, Radio } from "antd" +import t from '@/utils/t' +import { useEffect } from "react" + +const types = [ + { value: true, label: 'common.yes', }, + { value: false, label: 'common.no', checked: true, }, +] + +const OpenpaiForm = ({ form, openpai }) => { + + useEffect(() => { + form.setFieldsValue({ openpai: openpai }) + }, [openpai]) + + return openpai ? + ({ ...type, label: t(type.label) }))} /> + : null +} + +export default OpenpaiForm diff --git a/ymir/web/src/components/form/items/preProcess.js b/ymir/web/src/components/form/items/preProcess.js new file mode 100644 index 0000000000..a6425d26dc --- /dev/null +++ b/ymir/web/src/components/form/items/preProcess.js @@ -0,0 +1,37 @@ +import { Checkbox, Form, InputNumber } from "antd" +import t from '@/utils/t' +import { useState } from "react" + +const funcs = [ + { + func: 'longside_resize', + label: 'task.train.preprocess.resize', + params: [{ + key: 'dest_size', + rules: [{ required: true, message: t('task.train.preprocess.resize.placeholder'), }], + component: + }], + } +] + +const PreProcessForm = () => { + const [selected, setSelected] = useState([]) + const renderTitle = (func, label) => <> + + {t(label)} + + function preprocessSelected({ target: { value, checked } }) { + setSelected(old => ({ ...old, [value]: checked })) + } + return funcs.map(({ func, label, params }) => + + {selected[func] ? params.map(({ key, rules, component }) => ( + + {component} + + )) : null} + + ) +} + +export default PreProcessForm diff --git a/ymir/web/src/components/form/keywordSelect.js b/ymir/web/src/components/form/keywordSelect.js new file mode 100644 index 0000000000..2e776b0635 --- /dev/null +++ b/ymir/web/src/components/form/keywordSelect.js @@ -0,0 +1,66 @@ +import { Col, Row, Select } from 'antd' +import { useEffect, useState } from 'react' + +import t from '@/utils/t' +import useFetch from '@/hooks/useFetch' + + +const KeywordSelect = ({ value, onChange = () => { }, keywords, filter, ...resProps }) => { + const [options, setOptions] = useState([]) + const [keywordResult, getKeywords] = useFetch('keyword/getKeywords') + + useEffect(() => { + if (keywords) { + generateOptions(keywords.map(keyword => ({ name: keyword }))) + } else { + getKeywords({ limit: 9999 }) + } + }, [keywords]) + + useEffect(() => { + if (options.length) { + if (value) { + onChange(value, resProps.mode ? options.filter(opt => value.includes(opt.value)) : options.find(opt => opt.value === value)) + } + } + }, [options]) + + useEffect(() => { + if (options.length === 1) { + value = options[0].value + } + }, [options]) + + useEffect(() => { + if (keywordResult) { + generateOptions(keywordResult.items) + } + }, [keywordResult]) + + function generateOptions(keywords = []) { + filter = filter || (x => x) + const opts = filter(keywords).map(keyword => ({ + label: {keyword.name}, + aliases: keyword.aliases, + value: keyword.name, + })) + setOptions(opts) + } + + function filterOptions(options, filter = x => x) { + return filter(options) + } + + return ( + + ) +} + +export default KeywordSelect diff --git a/ymir/web/src/components/form/modelSelect.js b/ymir/web/src/components/form/modelSelect.js index 509368ffa7..1da0f84793 100644 --- a/ymir/web/src/components/form/modelSelect.js +++ b/ymir/web/src/components/form/modelSelect.js @@ -1,76 +1,80 @@ -import { Col, Row, Select } from 'antd' -import { connect } from 'dva' +import { Cascader, ConfigProvider } from 'antd' +import { useSelector } from 'umi' import { useEffect, useState } from 'react' import { percent } from '@/utils/number' +import t from '@/utils/t' +import useFetch from '@/hooks/useFetch' +import EmptyStateModel from '@/components/empty/model' - -const ModelSelect = ({ pid, value, allModels, onChange = () => { }, getModels, ...resProps }) => { +const ModelSelect = ({ + pid, value, onlyModel, changeByUser, + onChange = () => { }, onReady = () => { }, + filters, ...resProps +}) => { + const models = useSelector(state => state.model.allModels) + const [ms, setMS] = useState(null) const [options, setOptions] = useState([]) - const [models, setModels] = useState([]) + const [_, getModels] = useFetch('model/queryAllModels') - useEffect(() => { - fetchModels() - }, []) + useEffect(() => pid && getModels(pid), [pid]) - useEffect(() => { - if (options.length) { - if (value) { - onChange(value, resProps.mode ? options.filter(opt => value.includes(opt.value)) : options.find(opt => opt.value === value)) - } - } - }, [options]) + useEffect(() => setMS(value), [value]) - useEffect(() => { - setModels(allModels) - }, [allModels]) + useEffect(() => onReady(models), [models]) useEffect(() => { - if (options.length === 1) { - value = options[0].value + if (options.length && value && !changeByUser) { + let selected = null + if (resProps.multiple) { + selected = options.filter(opt => value.some(([model]) => opt.model.id === model)).map(opt => [opt, opt.value]) + } else { + const opt = options.find(opt => opt?.model?.id === value[0]) + selected = opt ? [opt, value[1] || opt?.model?.recommendStage] : undefined + } + if (!selected) { + onChange([], undefined) + setMS([]) + } else { + onChange(value, selected) + } } }, [options]) - useEffect(() => { - generateOptions() - }, [models]) - - function fetchModels() { - getModels(pid) - } + useEffect(() => generateOptions(), [models]) function generateOptions() { - const opts = models.map(model => { + const mds = filters ? filters(models) : models + const opts = mds.map(model => { + const name = `${model.name} ${model.versionName}` + const childrenNode = onlyModel ? {} : { + children: model.stages.map(stage => ({ + label: ` ${stage.name} (mAP:${percent(stage.map)}) ${stage.id === model.recommendStage ? t('common.recommend') : ''}`, + value: stage.id, + })) + } return { - label: - {model.name} {model.versionName} - mAP: {percent(model.map)} - , + label: name, model, value: model.id, + ...childrenNode, } }) setOptions(opts) } + function filter(input, path) { + return path.some(({ label = '' }) => label.toLowerCase().indexOf(input.toLowerCase()) > -1) + } + return ( - + }> + label.join('/')} + showCheckedStrategy={Cascader.SHOW_CHILD} showSearch={{ filter }} + placeholder={t('task.train.form.model.placeholder')} + allowClear {...resProps}> + ) } -const props = (state) => { - return { - allModels: state.model.allModels, - } -} -const actions = (dispatch) => { - return { - getModels(pid) { - return dispatch({ - type: 'model/queryAllModels', - payload: pid, - }) - } - } -} -export default connect(props, actions)(ModelSelect) +export default ModelSelect diff --git a/ymir/web/src/components/form/panel.js b/ymir/web/src/components/form/panel.js index dc09baf107..00ff648369 100644 --- a/ymir/web/src/components/form/panel.js +++ b/ymir/web/src/components/form/panel.js @@ -1,18 +1,26 @@ import { Col, Row } from "antd" import { ArrowDownIcon, ArrowRightIcon } from '@/components/common/icons' import s from './panel.less' +import { useEffect, useState } from "react" -const Panel = ({ hasHeader = true, toogleVisible = true, visible = false, setVisible = () => { }, label = '', children }) => { +const Panel = ({ hasHeader = true, toogleVisible = true, visible, setVisible = () => { }, label = '', bg = true, children }) => { + const [vis, setVis] = useState(false) + useEffect(() => { + setVis(visible) + }, [visible]) return (
    - {hasHeader ? setVisible(!visible)}> + {hasHeader ? { + setVis(!vis) + setVisible(!vis) + }}> {label} {toogleVisible ? - {visible ? : } + {vis ? : } : null} : null} - diff --git a/ymir/web/src/components/form/radioGroup.js b/ymir/web/src/components/form/radioGroup.js new file mode 100644 index 0000000000..00e047135b --- /dev/null +++ b/ymir/web/src/components/form/radioGroup.js @@ -0,0 +1,12 @@ +import { Radio } from "antd" +import t from '@/utils/t' + +const RadioGroup = ({ value, onChange = () => { }, options, labelPrefix = '' }) => ( + ({ ...item, label: t(`${labelPrefix}${item.label}`) }))} + value={value} + onChange={onChange} + > +) + +export default RadioGroup diff --git a/ymir/web/src/components/form/tip.js b/ymir/web/src/components/form/tip.js index 9e8f715f93..caea63ce98 100644 --- a/ymir/web/src/components/form/tip.js +++ b/ymir/web/src/components/form/tip.js @@ -1,14 +1,18 @@ -import { Col, Row } from "antd" +import { FailIcon, TipsIcon } from "@/components/common/icons" +import s from './tip.less' -import SingleTip from './singleTip' +const Tip = ({ type = 'success', content = '' }) => { + return content ? ( +
    + {getIcon(type)} + {content} +
    + ) : null +} -const Tip = ({ title = null, content = '', placement = 'right', span=6, formSpan=0, hidden = false, children }) => { - return ( - - {children} - {hidden ? null : } - - ) +function getIcon(type) { + const cls = `${s.icon} ${s[type]}` + return type === 'success' ? : } export default Tip diff --git a/ymir/web/src/components/form/tip.less b/ymir/web/src/components/form/tip.less index cc80197f9d..702f300018 100644 --- a/ymir/web/src/components/form/tip.less +++ b/ymir/web/src/components/form/tip.less @@ -1,4 +1,28 @@ -.icon { - cursor: pointer; - color: rgba(0, 0, 0, 0.25); +.tipContainer { + @error: rgb(242, 99, 123); + @success: @primary-color; + color: rgba(0, 0, 0, 0.65); + + &.error, &.success { + border-radius: 2px; + line-height: 40px; + padding: 0 10px; + } + &.success { + background: fade(@success, 10); + border: 1px solid @success; + } + &.error { + background: fade(@error, 10); + border: 1px solid @error; + } + .success, .error { + margin-right: 10px; + } + .success { + color: @success; + } + .error { + color: @error; + } } \ No newline at end of file diff --git a/ymir/web/src/components/form/uploader.js b/ymir/web/src/components/form/uploader.js index f981527b5f..1e6988e04f 100644 --- a/ymir/web/src/components/form/uploader.js +++ b/ymir/web/src/components/form/uploader.js @@ -3,7 +3,7 @@ import { useState, useEffect } from 'react' import ImgCrop from 'antd-img-crop' import { CloudUploadOutlined } from "@ant-design/icons" -import { getUploadUrl } from "../../services/common" +import { getUploadUrl } from "@/services/common" import storage from '@/utils/storage' import t from '@/utils/t' import 'antd/es/slider/style' @@ -16,8 +16,12 @@ const fileSuffix = { all: ['*'], } -function Uploader({ className, value=null, format="zip", label, max = 200, - maxCount = 1, info = '', type='', crop = false, showUploadList = true, onChange = ()=> {}, onRemove = () => {}}) { +function Uploader({ + className, value = null, format = "zip", label, max = 200, + maxCount = 1, info = '', type = 'primary', crop = false, + btnProps = {}, + showUploadList = true, onChange = () => { }, onRemove = () => { } +}) { label = label || t('model.add.form.upload.btn') const [files, setFiles] = useState(null) @@ -61,25 +65,25 @@ function Uploader({ className, value=null, format="zip", label, max = 200, } const uploader = - - + className={className} + fileList={files} + action={getUploadUrl()} + name='file' + headers={{ "Authorization": `Bearer ${storage.get("access_token")}` }} + // accept={fileSuffix[format].join(',')} + onChange={onFileChange} + onRemove={onRemove} + beforeUpload={beforeUpload} + maxCount={maxCount} + showUploadList={showUploadList} + > + + return ( <> - { format === 'avatar' && crop ? {uploader} : uploader} - {info ?

    {info}

    : null } + {format === 'avatar' && crop ? {uploader} : uploader} + {info ?

    {info}

    : null} ) } diff --git a/ymir/web/src/components/icon/Bushu.tsx b/ymir/web/src/components/icon/Bushu.tsx new file mode 100644 index 0000000000..a3f9f940ce --- /dev/null +++ b/ymir/web/src/components/icon/Bushu.tsx @@ -0,0 +1,49 @@ +import React, { useEffect, useRef } from 'react'; +import styles from './style.css'; +interface IconProps extends React.SVGProps { + size?: string | number; + width?: string | number; + height?: string | number; + spin?: boolean; + rtl?: boolean; + color?: string; + fill?: string; + stroke?: string; +} + +export default function Bushu(props: IconProps) { + const root = useRef(null) + const { size = '1em', width, height, spin, rtl, color, fill, stroke, className, ...rest } = props; + const _width = width || size; + const _height = height || size; + const _stroke = stroke || color; + const _fill = fill || color; + useEffect(() => { + if (!_fill) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-fill]').forEach(item => { + item.setAttribute('fill', item.getAttribute('data-follow-fill') || '') + }) + } + if (!_stroke) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-stroke]').forEach(item => { + item.setAttribute('stroke', item.getAttribute('data-follow-stroke') || '') + }) + } + }, [stroke, color, fill]) + return ( + + + + ) +} diff --git a/ymir/web/src/components/icon/New.tsx b/ymir/web/src/components/icon/New.tsx new file mode 100644 index 0000000000..5d886d51a6 --- /dev/null +++ b/ymir/web/src/components/icon/New.tsx @@ -0,0 +1,49 @@ +import React, { useEffect, useRef } from 'react'; +import styles from './style.css'; +interface IconProps extends React.SVGProps { + size?: string | number; + width?: string | number; + height?: string | number; + spin?: boolean; + rtl?: boolean; + color?: string; + fill?: string; + stroke?: string; +} + +export default function New(props: IconProps) { + const root = useRef(null) + const { size = '1em', width, height, spin, rtl, color, fill, stroke, className, ...rest } = props; + const _width = width || size; + const _height = height || size; + const _stroke = stroke || color; + const _fill = fill || color; + useEffect(() => { + if (!_fill) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-fill]').forEach(item => { + item.setAttribute('fill', item.getAttribute('data-follow-fill') || '') + }) + } + if (!_stroke) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-stroke]').forEach(item => { + item.setAttribute('stroke', item.getAttribute('data-follow-stroke') || '') + }) + } + }, [stroke, color, fill]) + return ( + + + + ) +} diff --git a/ymir/web/src/components/icon/Redu.tsx b/ymir/web/src/components/icon/Redu.tsx new file mode 100644 index 0000000000..e85190ff09 --- /dev/null +++ b/ymir/web/src/components/icon/Redu.tsx @@ -0,0 +1,49 @@ +import React, { useEffect, useRef } from 'react'; +import styles from './style.css'; +interface IconProps extends React.SVGProps { + size?: string | number; + width?: string | number; + height?: string | number; + spin?: boolean; + rtl?: boolean; + color?: string; + fill?: string; + stroke?: string; +} + +export default function Redu(props: IconProps) { + const root = useRef(null) + const { size = '1em', width, height, spin, rtl, color, fill, stroke, className, ...rest } = props; + const _width = width || size; + const _height = height || size; + const _stroke = stroke || color; + const _fill = fill || color; + useEffect(() => { + if (!_fill) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-fill]').forEach(item => { + item.setAttribute('fill', item.getAttribute('data-follow-fill') || '') + }) + } + if (!_stroke) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-stroke]').forEach(item => { + item.setAttribute('stroke', item.getAttribute('data-follow-stroke') || '') + }) + } + }, [stroke, color, fill]) + return ( + + + + ) +} diff --git a/ymir/web/src/components/icon/Shangjia.tsx b/ymir/web/src/components/icon/Shangjia.tsx new file mode 100644 index 0000000000..aba0cd961c --- /dev/null +++ b/ymir/web/src/components/icon/Shangjia.tsx @@ -0,0 +1,49 @@ +import React, { useEffect, useRef } from 'react'; +import styles from './style.css'; +interface IconProps extends React.SVGProps { + size?: string | number; + width?: string | number; + height?: string | number; + spin?: boolean; + rtl?: boolean; + color?: string; + fill?: string; + stroke?: string; +} + +export default function Shangjia(props: IconProps) { + const root = useRef(null) + const { size = '1em', width, height, spin, rtl, color, fill, stroke, className, ...rest } = props; + const _width = width || size; + const _height = height || size; + const _stroke = stroke || color; + const _fill = fill || color; + useEffect(() => { + if (!_fill) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-fill]').forEach(item => { + item.setAttribute('fill', item.getAttribute('data-follow-fill') || '') + }) + } + if (!_stroke) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-stroke]').forEach(item => { + item.setAttribute('stroke', item.getAttribute('data-follow-stroke') || '') + }) + } + }, [stroke, color, fill]) + return ( + + + + ) +} diff --git a/ymir/web/src/components/icon/Xiangmudiedai.tsx b/ymir/web/src/components/icon/Xiangmudiedai.tsx new file mode 100644 index 0000000000..017a643fa2 --- /dev/null +++ b/ymir/web/src/components/icon/Xiangmudiedai.tsx @@ -0,0 +1,49 @@ +import React, { useEffect, useRef } from 'react'; +import styles from './style.css'; +interface IconProps extends React.SVGProps { + size?: string | number; + width?: string | number; + height?: string | number; + spin?: boolean; + rtl?: boolean; + color?: string; + fill?: string; + stroke?: string; +} + +export default function Xiangmudiedai(props: IconProps) { + const root = useRef(null) + const { size = '1em', width, height, spin, rtl, color, fill, stroke, className, ...rest } = props; + const _width = width || size; + const _height = height || size; + const _stroke = stroke || color; + const _fill = fill || color; + useEffect(() => { + if (!_fill) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-fill]').forEach(item => { + item.setAttribute('fill', item.getAttribute('data-follow-fill') || '') + }) + } + if (!_stroke) { + (root.current as SVGSVGElement)?.querySelectorAll('[data-follow-stroke]').forEach(item => { + item.setAttribute('stroke', item.getAttribute('data-follow-stroke') || '') + }) + } + }, [stroke, color, fill]) + return ( + + + + ) +} diff --git a/ymir/web/src/components/icon/style.css b/ymir/web/src/components/icon/style.css new file mode 100644 index 0000000000..2da8ab39c6 --- /dev/null +++ b/ymir/web/src/components/icon/style.css @@ -0,0 +1,9 @@ +.spin,.spin svg {animation: iconpark-spin 1s infinite linear;} +.rtl,.rtl svg {transform: scaleX(-1);} +.spin.rtl,.spin.rtl svg {animation: iconpark-spin-rtl 1s infinite linear;} +@keyframes iconpark-spin { + 0% { -webkit-transform: rotate(0); transform: rotate(0);} 100% {-webkit-transform: rotate(360deg); transform: rotate(360deg);} +} +@keyframes iconpark-spin-rtl { + 0% {-webkit-transform: scaleX(-1) rotate(0); transform: scaleX(-1) rotate(0);} 100% {-webkit-transform: scaleX(-1) rotate(360deg); transform: scaleX(-1) rotate(360deg);} +} \ No newline at end of file diff --git a/ymir/web/src/components/model/editStageCell.js b/ymir/web/src/components/model/editStageCell.js new file mode 100644 index 0000000000..2e0b330be0 --- /dev/null +++ b/ymir/web/src/components/model/editStageCell.js @@ -0,0 +1,72 @@ +import { useState, useEffect, useRef } from "react" +import { Col, Form, Input, Row, Select } from "antd" +import t from '@/utils/t' +import { percent } from '@/utils/number' +import useFetch from '@/hooks/useFetch' +const { useForm } = Form + +const EditStageCell = ({ record, saveHandle = () => { } }) => { + const [editing, setEditing] = useState(false) + const selectRef = useRef(null) + const [form] = useForm() + const [result, setRecommendStage] = useFetch('model/setRecommendStage') + const recommendStage = record.stages.find(stage => stage.id === record.recommendStage) || {} + const multipleStages = record.stages.length > 1 + + useEffect(() => { + if (editing && multipleStages) { + selectRef.current.focus() + } + }, [editing]) + + useEffect(() => { + if (result) { + saveHandle(result, record) + } + }, [result]) + + const save = async () => { + try { + const values = await form.validateFields() + await setRecommendStage({ ...values, model: record.id }) + } catch (errInfo) { + console.log('Save failed:', errInfo) + } + setEditing(false) + } + + const tagRender = ({ stage, color = 'rgba(0, 0, 0, 0.65)' }) => ( + {stage.name} + mAP: {percent(stage.map)} + ) + + return editing && multipleStages ? ( + + + + + + ) : ( +
    setEditing(true)} + style={{ cursor: multipleStages ? 'pointer' : 'text' }} + > + {tagRender({ stage: recommendStage, color: 'orange' })} +
    + ) +} + +export default EditStageCell diff --git a/ymir/web/src/components/model/list.js b/ymir/web/src/components/model/list.js index f02e53a07e..75d8face3a 100644 --- a/ymir/web/src/components/model/list.js +++ b/ymir/web/src/components/model/list.js @@ -2,17 +2,16 @@ import React, { useEffect, useState, useRef } from "react" import { connect } from 'dva' import styles from "./list.less" import { Link, useHistory } from "umi" -import { Form, Input, Table, Modal, Row, Col, Tooltip, Pagination, Space, Empty, Button, } from "antd" -import { - SearchOutlined, -} from "@ant-design/icons" +import { Form, Input, Table, Modal, Row, Col, Tooltip, Pagination, Space, Empty, Button, message, Popover, } from "antd" import { diffTime } from '@/utils/date' -import { states } from '@/constants/model' +import { ResultStates } from '@/constants/common' import { TASKTYPES, TASKSTATES } from '@/constants/task' import t from "@/utils/t" -import { percent } from '@/utils/number' +import usePublish from "@/hooks/usePublish" +import { getDeployUrl } from '@/constants/common' +import CheckProjectDirty from "@/components/common/CheckProjectDirty" import Actions from "@/components/table/actions" import TypeTag from "@/components/task/typeTag" import RenderProgress from "@/components/common/progress" @@ -23,26 +22,32 @@ import { getTensorboardLink } from "@/services/common" import { ShieldIcon, VectorIcon, EditIcon, - EyeOffIcon, DeleteIcon, FileDownloadIcon, TrainIcon, WajueIcon, StopIcon,SearchIcon, + EyeOffIcon, DeleteIcon, FileDownloadIcon, TrainIcon, WajueIcon, StopIcon, SearchIcon, ArrowDownIcon, ArrowRightIcon, ImportIcon, BarchartIcon } from "@/components/common/icons" +import EditStageCell from "./editStageCell" +import { DescPop } from "../common/descPop" +import useRerunAction from "../../hooks/useRerunAction" const { useForm } = Form -function Model({ pid, project = {}, iterations, group, modelList, versions, query, ...func }) { +function Model({ pid, project = {}, iterations, groups, modelList, versions, query, ...func }) { const history = useHistory() const { name } = history.location.query const [models, setModels] = useState([]) const [modelVersions, setModelVersions] = useState({}) const [total, setTotal] = useState(1) - const [selectedVersions, setSelectedVersions] = useState({}) + const [selectedVersions, setSelectedVersions] = useState({ selected: [], versions: {} }) const [form] = useForm() const [current, setCurrent] = useState({}) - const [visibles, setVisibles] = useState(group ? { [group]: true } : {}) + const [visibles, setVisibles] = useState({}) + const [trainingUrl, setTrainingUrl] = useState('') let [lock, setLock] = useState(true) const hideRef = useRef(null) const delGroupRef = useRef(null) const terminateRef = useRef(null) + const generateRerun = useRerunAction() + const [publish, publishResult] = usePublish() /** use effect must put on the top */ useEffect(() => { @@ -52,6 +57,11 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer setLock(false) }, [history.location]) + useEffect(() => { + const initVisibles = groups.reduce((prev, group) => ({ ...prev, [group]: true }), {}) + setVisibles(initVisibles) + }, [groups]) + useEffect(() => { const mds = setGroupLabelsByProject(modelList.items, project) setModels(mds) @@ -106,6 +116,14 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer } }, [query, lock]) + useEffect(() => { + const selected = selectedVersions.selected + const mvs = Object.values(modelVersions).flat().filter(version => selected.includes(version.id)) + const hashs = mvs.map(version => version.task.hash) + const url = getTensorboardLink(hashs) + setTrainingUrl(url) + }, [selectedVersions]) + async function initState() { await func.resetQuery() form.resetFields() @@ -116,13 +134,19 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer title: showTitle("model.column.name"), dataIndex: "versionName", className: styles[`column_name`], - render: (name, { id, state, projectLabel, iterationLabel }) => - {name} - - {projectLabel ?
    {projectLabel}
    : null} - {iterationLabel ?
    {iterationLabel}
    : null} - -
    , + render: (name, { id, description, projectLabel, iterationLabel }) => { + const popContent = + const content = + {name} + + {projectLabel ?
    {projectLabel}
    : null} + {iterationLabel ?
    {iterationLabel}
    : null} + +
    + return description ? + {content} + : content + }, ellipsis: true, }, { @@ -131,16 +155,17 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer render: (type) => , }, { - title: showTitle("model.column.map"), - dataIndex: "map", - render: map => {percent(map)}, - sorter: (a, b) => a - b, - align: 'center', + title: showTitle("model.column.stage"), + dataIndex: "recommendStage", + render: (_, record) => isValidModel(record.state) ? + : null, + // align: 'center', + width: 300, }, { title: showTitle('dataset.column.state'), dataIndex: 'state', - render: (state, record) => RenderProgress(state, record, true), + render: (state, record) => RenderProgress(state, record), // width: 60, }, { @@ -169,9 +194,19 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer const listChange = (current, pageSize) => { const limit = pageSize const offset = (current - 1) * pageSize - func.updateQuery({ ...query, limit, offset }) + func.updateQuery({ ...query, current, limit, offset }) } + function updateModelVersion(result) { + setModelVersions(mvs => { + return { + ...mvs, + [result.groupId]: mvs[result.groupId].map(version => { + return version.id === result.id ? result : version + }) + } + }) + } async function showVersions(id) { setVisibles((old) => ({ ...old, [id]: !old[id] })) @@ -182,6 +217,7 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer Object.keys(versions).forEach(gid => { const list = versions[gid] const updatedList = list.map(item => { + delete item.iterationLabel const iteration = iterations.find(iter => iter.model === item.id) if (iteration) { item.iterationLabel = t('iteration.tag.round', iteration) @@ -197,6 +233,7 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer Object.keys(versions).forEach(gid => { const list = versions[gid] const updatedList = list.map(item => { + delete item.projectLabel item = setLabelByProject(project?.model, 'isInitModel', item) return { ...item } }) @@ -207,6 +244,7 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer function setGroupLabelsByProject(items, project) { return items.map(item => { + delete item.projectLabel item = setLabelByProject(project?.model, 'isInitModel', item) return { ...item } }) @@ -234,8 +272,16 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer } const actionMenus = (record) => { - const { id, name, url, state, taskState, taskType, task, isProtected } = record + const { id, name, url, state, taskState, taskType, task, isProtected, stages, recommendStage } = record + const actions = [ + { + key: "publish", + label: t("model.action.publish"), + hidden: () => !isValidModel(state) || !getDeployUrl(), + onclick: () => publish(record), + icon: , + }, { key: "verify", label: t("model.action.verify"), @@ -255,21 +301,21 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer key: "mining", label: t("dataset.action.mining"), hidden: () => !isValidModel(state), - onclick: () => history.push(`/home/task/mining/${pid}?mid=${id}`), + onclick: () => history.push(`/home/project/${pid}/mining?mid=${id},${recommendStage}`), icon: , }, { key: "train", label: t("dataset.action.train"), hidden: () => !isValidModel(state), - onclick: () => history.push(`/home/task/train/${pid}?mid=${id}`), + onclick: () => history.push(`/home/project/${pid}/train?mid=${id},${recommendStage}`), icon: , }, { key: "inference", label: t("dataset.action.inference"), hidden: () => !isValidModel(state), - onclick: () => history.push(`/home/task/inference/${pid}?mid=${id}`), + onclick: () => history.push(`/home/project/${pid}/inference?mid=${id},${recommendStage}`), icon: , }, { @@ -281,17 +327,18 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer }, { key: "tensor", - label: 'Tensorboard', + label: t('task.action.training'), target: '_blank', link: getTensorboardLink(task.hash), hidden: () => taskType !== TASKTYPES.TRAINING, icon: , }, + generateRerun(record), { key: "hide", label: t("common.action.hide"), onclick: () => hide(record), - hidden: ()=> hideHidden(record), + hidden: () => hideHidden(record), icon: , }, ] @@ -304,15 +351,32 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer } const multipleInfer = () => { - const ids = Object.values(selectedVersions) - history.push(`/home/task/inference/${pid}?mid=${ids}`) + const { selected } = selectedVersions + const versionsObject = Object.values(versions).flat() + const stages = versionsObject.filter(md => selected.includes(md.id)).map(md => { + return [md.id, md.recommendStage].toString() + }) + if (stages.length) { + history.push(`/home/project/${pid}/inference?mid=${stages.join('|')}`) + } else { + message.warning(t('model.list.batch.invalid')) + } } const multipleHide = () => { - const ids = Object.values(selectedVersions).flat() const allVss = Object.values(versions).flat() - const vss = allVss.filter(({id}) => ids.includes(id)) - hideRef.current.hide(vss, project.hiddenModels) + const vss = allVss.filter(({ id, state }) => selectedVersions.selected.includes(id)) + if (vss.length) { + hideRef.current.hide(vss, project.hiddenModels) + } else { + message.warning(t('model.list.batch.invalid')) + } + } + + const getDisabledStatus = (filter = () => { }) => { + const allVss = Object.values(versions).flat() + const { selected } = selectedVersions + return !selected.length || allVss.filter(({ id }) => selected.includes(id)).some(version => filter(version)) } const hide = (version) => { @@ -325,12 +389,18 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer const hideOk = (result) => { result.forEach(item => fetchVersions(item.model_group_id, true)) getData() - setSelectedVersions({}) + setSelectedVersions({ selected: [], versions: {} }) } - + function rowSelectChange(gid, rowKeys) { - setSelectedVersions(old => ({ ...old, [gid]: rowKeys })) + setSelectedVersions(({ versions }) => { + versions[gid] = rowKeys + return { + selected: Object.values(versions).flat(), + versions: { ...versions }, + } + }) } const stop = (dataset) => { @@ -369,15 +439,15 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer } function isValidModel(state) { - return states.VALID === state + return ResultStates.VALID === state } function isRunning(state) { - return states.READY === state + return ResultStates.READY === state } function add() { - history.push(`/home/model/import/${pid}`) + history.push(`/home/project/${pid}/model/import`) } const addBtn = ( @@ -386,16 +456,17 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer ) - const renderMultipleActions = Object.values(selectedVersions).flat().length ? ( - <> - - - - ) : null + const renderMultipleActions = <> + + + + const renderGroups = (<>
    @@ -416,8 +487,8 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer onChange={tableChange} rowKey={(record) => record.id} rowSelection={{ + selectedRowKeys: selectedVersions.versions[group.id], onChange: (keys) => rowSelectChange(group.id, keys), - getCheckboxProps: (record) => ({ disabled: isRunning(record.state), }), }} rowClassName={(record, index) => index % 2 === 0 ? '' : 'oddRow'} columns={columns} @@ -426,17 +497,23 @@ function Model({ pid, project = {}, iterations, group, modelList, versions, quer
    ) : }
    - + ) return (
    -
    - - {addBtn} - {renderMultipleActions} - -
    + + + + {addBtn} + {renderMultipleActions} + + + + + +
    [ { @@ -32,20 +33,21 @@ const menus = () => [ icon: , }, { - label: 'common.top.menu.configure', - key: "/home/configures", + label: 'algo.label', + key: "/home/algo", + icon: , + hidden: !getDeployUrl(), + }, + { + label: 'common.top.menu.image', + key: "/home/image", icon: , - children: [ - { - label: 'common.top.menu.image', - key: "/home/image", - }, - { - label: 'common.top.menu.permission', - key: "/home/permission", - permission: ROLES.SUPER, - }, - ] + }, + { + label: 'common.top.menu.permission', + key: "/home/permission", + permission: ROLES.SUPER, + icon: }, ] @@ -104,7 +106,7 @@ function HeaderNav({ simple = false, username, loginout, avatar, role }) { menu.children = handleMenus(menu.children) } menu.label = t(menu.label) - validPermission(role, menu.permission) && result.push(menu) + !menu.hidden && validPermission(role, menu.permission) && result.push(menu) }) return result } diff --git a/ymir/web/src/components/project/keywordsItem.js b/ymir/web/src/components/project/keywordsItem.js new file mode 100644 index 0000000000..5138db6f1a --- /dev/null +++ b/ymir/web/src/components/project/keywordsItem.js @@ -0,0 +1,6 @@ +const KeywordsItem = ({ keywords = [], len = 5 }) => { + const kws = keywords.length > len ? [...keywords.slice(0, len), '...'] : keywords + return {kws.join(',')} +} + +export default KeywordsItem diff --git a/ymir/web/src/components/project/list.js b/ymir/web/src/components/project/list.js index ccf64071be..d6b0c3cb9a 100644 --- a/ymir/web/src/components/project/list.js +++ b/ymir/web/src/components/project/list.js @@ -2,7 +2,8 @@ import { List, Skeleton, Space, Col, Row, Popover } from "antd" import t from "@/utils/t" import s from "./list.less" import { useHistory } from "umi" -import { getStageLabel } from '@/constants/project' +import { getStageLabel } from '@/constants/iteration' +import KeywordsItem from "./keywordsItem" export const Lists = ({ projects = [], more = '' }) => { const history = useHistory() @@ -10,34 +11,34 @@ export const Lists = ({ projects = [], more = '' }) => { const title = - {item.name} + {item.name} {item.isExample ? {t('project.example')} : null} {t('project.train_classes')}: - {item.keywords.join(',')} + - + {item.enableIteration ? {t('project.iteration.current')}: {t(getStageLabel(item.currentStage, item.round))} - + : null } {more} const tipContent =
    -
    {t('project.train_set')}:{item.trainSet?.name}
    -
    {t('project.test_set')}:{item.testSet?.name}
    -
    {t('project.mining_set')}:{item.miningSet?.name}
    +
    {t('project.train_set')}: {item.trainSet?.name}
    +
    {t('project.test_set')}: {item.testSet?.name}
    +
    {t('project.mining_set')}: {item.miningSet?.name}
    ; const desc = <> -
    Datasets
    +
    {t('project.tab.set.title')}
    {item.setCount}
    -
    Models
    +
    {t('project.tab.model.title')}
    {item.modelCount}
    @@ -51,10 +52,10 @@ export const Lists = ({ projects = [], more = '' }) => {
    - + {item.enableIteration ?
    {t('project.iteration.number')}
    {item.round}
    - + : null } {t('project.content.desc')}: {item.description} @@ -63,7 +64,7 @@ export const Lists = ({ projects = [], more = '' }) => { return { history.push(`/home/project/detail/${item.id}`) }}> + onClick={() => { history.push(`/home/project/${item.id}/detail`) }}> diff --git a/ymir/web/src/components/table/actions.js b/ymir/web/src/components/table/actions.js index b22b8e7928..e584c26450 100644 --- a/ymir/web/src/components/table/actions.js +++ b/ymir/web/src/components/table/actions.js @@ -6,25 +6,29 @@ const actions = (menus) => menus.map((menu, i) => action(menu, i === menus.lengt const isOuterLink = (link) => /^http(s)?:/i.test(link) -const moreActions = (menus) => { - return ( - - {menus.map((menu) => ( - - {menu.link ? {action(menu)} : action(menu)} - - ))} - - ) -} +const moreActions = (menus) => ({ + key: menu.key, + label: action(menu) + }))} /> function action({ key, onclick = () => { }, icon, label, link, target, disabled }, last) { + const cls = `${s.action} ${last ? s.last : ''}` const btn = ( - ) - return link ? {btn} : btn + return link ? + {icon} {label} + : btn } const Actions = ({ menus, showCount = 3 }) => { diff --git a/ymir/web/src/components/table/table.less b/ymir/web/src/components/table/table.less index 43a0b0ea14..b8cdabb50d 100644 --- a/ymir/web/src/components/table/table.less +++ b/ymir/web/src/components/table/table.less @@ -1,6 +1,7 @@ .actions { color: #3BA0FF; .action { + white-space: nowrap; padding: 0; height: 22px; line-height: 22px; @@ -23,7 +24,9 @@ margin-left: 0; } } - .l { - +} +.more { + .action { + color: rgba(0, 0, 0, 0.65); } } \ No newline at end of file diff --git a/ymir/web/src/components/tabs/cardTabs.js b/ymir/web/src/components/tabs/cardTabs.js new file mode 100644 index 0000000000..2402eb5255 --- /dev/null +++ b/ymir/web/src/components/tabs/cardTabs.js @@ -0,0 +1,37 @@ +import { Card } from "antd" +import { useEffect, useState } from "react" +import { useLocation, useHistory } from "umi" + +export const CardTabs = ({ data = [], initialTab, ...props }) => { + const location = useLocation() + const history = useHistory() + const [tabs, setTabs] = useState([]) + const [contents, setContents] = useState({}) + const [active, setActive] = useState(null) + + useEffect(() => (tabs.length && !active) && setActive(initialTab || tabs[0].key), [tabs]) + useEffect(() => { + const type = location?.state?.type + if (typeof type !== 'undefined') { + setActive(type) + } + }, [location.state]) + + useEffect(() => { + setTabs(data.map(({ tab, key }) => ({ tab: tab || key, key }))) + setContents(data.reduce((prev, { content, key }) => ({ ...prev, [key]: content }), {})) + }, [data]) + + return history.replace({ state: { type: key } })} + tabProps={{ + moreIcon: null, + }} + > + {contents[active]} + +} diff --git a/ymir/web/src/components/task/BottomButtons.js b/ymir/web/src/components/task/BottomButtons.js new file mode 100644 index 0000000000..8e8b7d5709 --- /dev/null +++ b/ymir/web/src/components/task/BottomButtons.js @@ -0,0 +1,71 @@ +import React, { } from "react" +import { Button, Form, Space } from "antd" + +import t from "@/utils/t" + + +function BottomButtons({ + mode='normal', + fromIteration = false, + stage, + result, + stepState = 'start', + label = 'common.confirm', + ok = () => { }, + next = () => {}, + skip = () => { }, + update = () => { } +}) { + + const currentStage = () => stage.value === stage.current + const finishStage = () => stage.value < stage.current + const pendingStage = () => stage.value > stage.current + + const isPending = () => state < 0 + const isReady = () => state === states.READY + const isValid = () => state === states.VALID + const isInvalid = () => state === states.INVALID + + // !stage.unskippable && !end && currentStage() + const skipBtn = + + + + const confirmBtn = + + + + const backBtn = + + + + const nextBtn = + + + + return ( + + + {mode === 'iteration' ? <> + {stepState === 'start' ? confirmBtn : null} + {stepState === 'finish' ? nextBtn: null} + {skipBtn} + : <> + {confirmBtn} + {backBtn} + + } + + + ) +} + +export default BottomButtons diff --git a/ymir/web/src/components/task/detail.js b/ymir/web/src/components/task/detail.js index 9e739cffc0..e22f67e65a 100644 --- a/ymir/web/src/components/task/detail.js +++ b/ymir/web/src/components/task/detail.js @@ -1,12 +1,8 @@ -import React, { useEffect, useRef, useState } from "react" -import { connect } from "dva" -import { Link, useHistory } from "umi" +import React, { useEffect, useState } from "react" +import { Link, useHistory, useParams } from "umi" import { - Button, - Card, Col, Descriptions, - Progress, Row, Space, Tag, @@ -15,24 +11,36 @@ import { import t from "@/utils/t" import { format } from "@/utils/date" import { getTensorboardLink } from "@/services/common" -import Terminate from "./terminate" -import { TASKTYPES } from "@/constants/task" -import s from "./detail.less" -import IgnoreKeywords from "../common/ignoreKeywords" +import { TASKTYPES, getTaskTypeLabel } from "@/constants/task" +import useFetch from '@/hooks/useFetch' +import { getRecommendStage } from '@/constants/model' + +import renderLiveCodeItem from '@/components/task/items/livecode' const { Item } = Descriptions -function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { +function TaskDetail({ task = {} }) { const history = useHistory() const id = task.id - const [datasets, setDatasets] = useState({}) - const [model, setModel] = useState({}) + const { id: pid } = useParams() + const [datasetNames, setDatasetNames] = useState({}) + const [datasets, getDatasets] = useFetch('dataset/batchDatasets', []) + const [model, getModel] = useFetch('model/getModel', {}) useEffect(() => { task.id && !isImport(task.type) && fetchDatasets() - hasValidModel(task.type) && task?.parameters?.model_id && fetchModel(task.parameters.model_id) + hasValidModel(task.type) && task?.parameters?.model_id && getModel({ id: task.parameters.model_id }) }, [task.id]) + useEffect(() => { + if (!datasets.length) { + return + } + const names = {} + datasets.forEach((ds) => (names[ds.id] = ds)) + setDatasetNames(names) + }, [datasets]) + async function fetchDatasets() { const pa = task.parameters || {} const inds = pa.include_datasets || [] @@ -47,16 +55,7 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { if (!ids.length) { return } - const dss = await batchDatasets(ids) - const names = {} - dss.forEach((ds) => (names[ds.id] = ds)) - setDatasets(names) - } - - async function fetchModel(id) { - const result = await getModel(id) - - result && setModel(result) + getDatasets({ pid, ids }) } const labelStyle = { @@ -65,9 +64,6 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { justifyContent: "flex-end", } - function isModel(type) { - return [TASKTYPES.TRAINING, TASKTYPES.MODELCOPY, TASKTYPES.MODELIMPORT].includes(type) - } function hasValidModel(type) { return [TASKTYPES.TRAINING, TASKTYPES.MINING, TASKTYPES.INFERENCE].includes(type) } @@ -77,7 +73,7 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { } function renderDatasetName(id) { - const ds = datasets[id] + const ds = datasetNames[id] const name = ds ? `${ds.name} ${ds.versionName}` : id return ( @@ -89,23 +85,49 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { return {dts.map((id) => renderDatasetName(id))} } - function renderConfig(config = {}) { - return Object.keys(config).map((key) => ( - - - {key}: - - {config[key].toString()} - - )) + function renderModel(id, pid, model = {}, label = 'task.mining.form.model.label') { + const name = model.id ? `${model.name} ${model.versionName} ${getRecommendStage(model).name}` : id + return id ? + + {name} + + : null } - function renderTrainKeywords(keywords = []) { - return - {keywords.map((keyword) => ( - {keyword} + function renderDuration(label) { + return label ? {label} : null + } + + function renderKeepAnnotations(type) { + const maps = { 1: 'gt', 2: 'pred' } + const label = type ? maps[type] : 'none' + return t(`task.label.form.keep_anno.${label}`) + } + + function renderPreProcess(preprocess) { + return preprocess ? + {Object.keys(preprocess).map((key) => ( + + + {key}: + + {JSON.stringify(preprocess[key])} + ))} - + : null + } + + function renderConfig(config = {}) { + return { + Object.keys(config).map((key) => ( + + + {key}: + + {config[key].toString()} + + )) + } } function renderTrainImage(image, span = 1) { @@ -114,20 +136,6 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { } - function renderTrainAlgo(param = {}) { - return <> - - {param.network} - - - {param.backbone} - - - {t('task.train.form.traintypes.detect')} - - - } - function renderDatasetSource(id) { return {renderDatasetName(id)} } @@ -151,6 +159,8 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { [TASKTYPES.COPY]: renderCopy, [TASKTYPES.INFERENCE]: renderInference, [TASKTYPES.FUSION]: renderFusion, + [TASKTYPES.MERGE]: renderMerge, + [TASKTYPES.FILTER]: renderFilter, [TASKTYPES.MODELCOPY]: renderModelCopy, [TASKTYPES.MODELIMPORT]: renderModelImport, [TASKTYPES.SYS]: renderSys, @@ -168,28 +178,24 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { {renderDatasetName(task.parameters.validation_dataset_id)} - {renderTrainKeywords(task?.parameters?.keywords)} - {renderTrainAlgo(task?.parameters)} + {renderModel(task.parameters.model_id, task.project_id, model, 'task.detail.label.premodel')} + {renderDuration(task.durationLabel)} + {renderLiveCodeItem(task.config)} {renderTrainImage(task?.parameters?.docker_image, 2)} - + {t("task.detail.tensorboard.link.label")} - - {renderConfig(task.config)} - + {renderPreProcess(task.parameters?.preprocess)} + {renderConfig(task.config)} ) const renderMining = () => ( <> {renderDatasetSource(task?.parameters.dataset_id)} {renderCreateTime(task.create_datetime)} - - - {model?.name || task.parameters.model_id} - - + {renderModel(task.parameters.model_id, task.project_id, model)} {task.parameters.mining_algorithm} @@ -204,9 +210,8 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { {task.parameters.docker_image} - - {renderConfig(task.config)} - + {renderLiveCodeItem(task.config)} + {renderConfig(task.config)} ) const renderLabel = () => ( @@ -224,7 +229,8 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { ))} - {task.parameters.keep_annotations ? t("common.yes") : t("common.no")} + {renderKeepAnnotations(task.parameters.annotation_type)} + {/* {task.parameters.keep_annotations ? t("common.yes") : t("common.no")} */} {task.parameters.extra_url ? ( @@ -238,39 +244,28 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { const renderModelImport = () => <> {t('task.type.modelimport')} {renderCreateTime(task.create_datetime)} - {renderTrainKeywords(task?.parameters?.keywords)} - {renderTrainAlgo(task?.parameters)} {renderTrainImage(task?.parameters?.docker_image, 2)} - - {renderConfig(task.config)} - + {renderConfig(task.config)} const renderModelCopy = () => <> {t('task.type.modelcopy')} {renderCreateTime(task.create_datetime)} - {renderTrainKeywords(task?.parameters?.keywords)} - {renderTrainAlgo(task?.parameters)} + {renderModel(task.parameters.model_id, task.project_id, model, 'task.detail.label.premodel')} + {renderLiveCodeItem(task.config)} {renderTrainImage(task?.parameters?.docker_image, 2)} - - {renderConfig(task.config)} - + {renderPreProcess(task.parameters?.preprocess)} + {renderConfig(task.config)} const renderImport = () => ( <> {renderImportSource(task?.parameters)} {renderCreateTime(task.create_datetime)} - - - ) const renderCopy = () => ( <> {renderImportSource(task?.parameters)} {renderCreateTime(task.create_datetime)} - - - ) const renderInference = () => ( @@ -288,12 +283,11 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { {task.parameters.top_k} - + {task.parameters.docker_image} - - {renderConfig(task.config)} - + {renderLiveCodeItem(task.config)} + {renderConfig(task.config)} ) const renderFusion = () => ( @@ -321,6 +315,37 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { ) + const renderMerge = () => ( + <> + {renderDatasetSource(task?.parameters?.dataset_id)} + {renderCreateTime(task.create_datetime)} + + {renderDatasetNames(task?.parameters?.include_datasets)} + + + {renderDatasetNames(task?.parameters?.exclude_datasets)} + + + ) + const renderFilter = () => ( + <> + {renderDatasetSource(task?.parameters?.dataset_id)} + {renderCreateTime(task.create_datetime)} + + {task.parameters?.include_keywords?.map((keyword) => ( + {keyword} + ))} + + + {task.parameters?.exclude_keywords?.map((keyword) => ( + {keyword} + ))} + + + {task?.parameters?.sampling_count} + + + ) return (
    @@ -328,7 +353,7 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { column={2} bordered labelStyle={labelStyle} - title={
    {t("dataset.column.source")}
    } + title={
    {t("dataset.column.source") + " > " + t(getTaskTypeLabel(task.type))}
    } className='infoTable' > {task.id ? renderTypes() : null} @@ -337,28 +362,4 @@ function TaskDetail({ task = {}, ignore = [], batchDatasets, getModel }) { ) } -const props = (state) => { - return { - logined: state.user.logined, - taskItem: state.task.task, - } -} - -const actions = (dispatch) => { - return { - batchDatasets(ids) { - return dispatch({ - type: "dataset/batchDatasets", - payload: ids, - }) - }, - getModel(id, force) { - return dispatch({ - type: "model/getModel", - payload: { id, force }, - }) - }, - } -} - -export default connect(props, actions)(TaskDetail) +export default TaskDetail diff --git a/ymir/web/src/components/task/error.js b/ymir/web/src/components/task/error.js index 0e23c3f35f..16f28f59e5 100644 --- a/ymir/web/src/components/task/error.js +++ b/ymir/web/src/components/task/error.js @@ -13,7 +13,7 @@ const labelStyle = { justifyContent: "flex-end", } -export default function Error({ code, msg = '' }) { +export default function Error({ code, msg = '', terminated }) { const [visible, setVisible] = useState(false) function formatErrorMessage(message) { @@ -22,22 +22,29 @@ export default function Error({ code, msg = '' }) {
    } - return
    + const renderError = <> + + {code ? t(`error${code}`) : null} + {msg ? setVisible(!visible)}>{visible ? : } : null} + + {msg && visible ? + {formatErrorMessage(msg)} + : null} + + const renderTerminated = + {t('task.detail.terminated')} + + + return terminated || code ?
    {t("task.detail.error.title")}
    } + title={
    {t("task.state")}
    } className='infoTable' > - - {t(`error${code}`)} - {msg ? setVisible(!visible)}>{visible ? : } : null} - - {msg && visible ? - {formatErrorMessage(msg)} - : null} + {terminated ? renderTerminated : renderError } -
    +
    : null } \ No newline at end of file diff --git a/ymir/web/src/components/task/fusion.js b/ymir/web/src/components/task/fusion.js new file mode 100644 index 0000000000..f5ac278708 --- /dev/null +++ b/ymir/web/src/components/task/fusion.js @@ -0,0 +1,233 @@ +import React, { useState, useEffect, useCallback } from "react" +import { Select, Button, Form, message, Card, Space, Radio, Row, Col, InputNumber, Checkbox } from "antd" +import { useHistory, useParams, useSelector } from "umi" + +import { formLayout } from "@/config/antd" +import t from "@/utils/t" +import { randomNumber } from "@/utils/number" +import useFetch from '@/hooks/useFetch' +import { MiningStrategy } from '@/constants/iteration' + +import RecommendKeywords from "@/components/common/recommendKeywords" +import Panel from "@/components/form/panel" +import DatasetSelect from "@/components/form/datasetSelect" +import Desc from "@/components/form/desc" +import SubmitButtons from "./submitButtons" + +function Fusion({ query = {}, hidden, ok = () => { }, bottom }) { + const { did, iterationId, currentStage, chunk, strategy = '', merging } = query + + const pageParams = useParams() + const pid = Number(pageParams.id) + const history = useHistory() + const [form] = Form.useForm() + const [includeDatasets, setIncludeDatasets] = useState([]) + const [excludeDatasets, setExcludeDatasets] = useState([]) + const [miningStrategy, setMiningStrategy] = useState(strategy || 0) + const [excludeResult, setExcludeResult] = useState(strategy === '' ? false : true) + const [keywords, setKeywords] = useState([]) + const [selectedKeywords, setSelectedKeywords] = useState([]) + const [selectedExcludeKeywords, setExcludeKeywords] = useState([]) + const [visible, setVisible] = useState(false) + const [fusionResult, fusion] = useFetch("task/fusion") + const dataset = useSelector(({ dataset }) => dataset.dataset[did] || {}) + const [_d, getDataset] = useFetch('dataset/getDataset') + + const initialValues = { + name: 'task_fusion_' + randomNumber(), + samples: chunk || 1000, + include_datasets: Number(merging) ? [Number(merging)] : [], + strategy: 2, + } + + useEffect(() => fusionResult && ok(fusionResult), [fusionResult]) + + useEffect(() => did && getDataset({ id: did }), [did]) + + useEffect(() => { + dataset.id && includeDatasets.length && setKeywordOptions([dataset, ...includeDatasets]) + }, [dataset.id, includeDatasets]) + + useEffect(() => { + const state = history.location.state + + if (state?.record) { + const { parameters, name, } = state.record + const { include_classes, include_datasets, exclude_classes, include_strategy } = parameters + //do somethin + form.setFieldsValue({ + name: `${name}_${randomNumber()}`, + datasets: include_datasets, + inc: include_classes, + exc: exclude_classes, + strategy: include_strategy, + }) + setSelectedKeywords(include_classes) + setExcludeKeywords(exclude_classes) + history.replace({ state: {} }) + } + }, [history.location.state]) + + const setKeywordOptions = (datasets = []) => { + const kws = datasets.map(ds => ds.keywords).flat().filter(i => i) + console.log('kws:', kws) + setKeywords([...new Set(kws)].sort()) + } + + const checkInputs = (i) => { + return i.exc || i.inc || i.samples || i?.exclude_datasets?.length || i?.include_datasets?.length + } + + const onFinish = async (values) => { + if (!checkInputs(values)) { + return message.error(t('dataset.fusion.validate.inputs')) + } + const params = { + ...values, + project_id: dataset.projectId, + group_id: dataset.groupId, + dataset: did, + include: selectedKeywords, + exclude: selectedExcludeKeywords, + mining_strategy: miningStrategy, + exclude_result: excludeResult, + include_strategy: Number(values.strategy) || 2, + } + if (iterationId) { + params.iteration = iterationId + params.stage = currentStage + } + fusion(params) + } + + const onFinishFailed = (err) => { + console.log("on finish failed: ", err) + } + + function onIncludeDatasetChange(values) { + setIncludeDatasets(values) + + // reset + setSelectedKeywords([]) + setExcludeKeywords([]) + form.setFieldsValue({ inc: [], exc: [] }) + } + function onExcludeDatasetChange(values) { + setExcludeDatasets(values) + // todo inter keywords + } + + function miningStrategyChanged({ target: { checked } }) { + if (Number(strategy) === MiningStrategy.free) { + setMiningStrategy(checked ? MiningStrategy.unique : MiningStrategy.free) + setExcludeResult(true) + } else { + setExcludeResult(checked) + } + } + + function selectRecommendKeywords(keyword) { + const kws = [...new Set([...selectedKeywords, keyword])] + setSelectedKeywords(kws) + form.setFieldsValue({ inc: kws }) + } + + const includesFilter = useCallback((dss) => dss.filter(ds => ![...excludeDatasets, did].includes(ds.id)), [excludeDatasets, did]) + + return ( + + + + {bottom ? bottom : } + + + ) +} + +export default Fusion diff --git a/ymir/web/src/components/task/items/keywords.js b/ymir/web/src/components/task/items/keywords.js new file mode 100644 index 0000000000..2c46cd2fd1 --- /dev/null +++ b/ymir/web/src/components/task/items/keywords.js @@ -0,0 +1,8 @@ +import { Descriptions, Tag } from "antd" +import t from '@/utils/t' + +export default (keywords = []) => ( + + {keywords.map((keyword) => {keyword})} + +) diff --git a/ymir/web/src/components/task/items/livecode.js b/ymir/web/src/components/task/items/livecode.js new file mode 100644 index 0000000000..3baf9d42dc --- /dev/null +++ b/ymir/web/src/components/task/items/livecode.js @@ -0,0 +1,22 @@ +import { Descriptions, Tag } from "antd" +import t from '@/utils/t' +import { FIELDS, getConfigUrl, isLiveCode } from "@/components/form/items/liveCodeConfig" + +export default (config = {}) => { + const configUrl = getConfigUrl(config) + const fields = FIELDS.map(({ key, field }, index) => ({ + label: `task.train.live.${key}`, + key: field, + extra: index === FIELDS.length - 1 ? {t('common.view')} : null, + })) + const typeLabel = isLiveCode(config) ? 'live' : 'local' + const typeItem = {t(`task.detail.function.${typeLabel}`)} + return <> + {typeItem} + {isLiveCode(config) ? fields.map(({ label, key, extra }) => ( + + {config[key]} {extra} + + )) : null} + +} diff --git a/ymir/web/src/components/task/label.js b/ymir/web/src/components/task/label.js new file mode 100644 index 0000000000..34c66a08b6 --- /dev/null +++ b/ymir/web/src/components/task/label.js @@ -0,0 +1,211 @@ +import React, { useEffect, useState } from "react" +import { connect } from "dva" +import { Select, Input, Button, Form, Row, Col, Checkbox, Space, } from "antd" +import { useHistory, useParams, Link } from "umi" + +import { formLayout } from "@/config/antd" +import t from "@/utils/t" +import Uploader from "@/components/form/uploader" +import { randomNumber } from "@/utils/number" +import useFetch from '@/hooks/useFetch' + +import DatasetSelect from "@/components/form/datasetSelect" +import Desc from "@/components/form/desc" +import Tip from "@/components/form/tip" +import SubmitButtons from "./submitButtons" + +import styles from "./label.less" +import KeepAnnotations from "./label/keepAnnotations" + +const LabelTypes = () => [ + { id: "part", label: t('task.label.form.type.newer'), checked: true }, + { id: "all", label: t('task.label.form.type.all') }, +] + +function Label({ query = {}, hidden, datasets, keywords, ok = () => { }, bottom, ...func }) { + const pageParams = useParams() + const pid = Number(pageParams.id) + const { iterationId, outputKey, currentStage } = query + const did = Number(query.did) + const history = useHistory() + const [doc, setDoc] = useState(undefined) + const [form] = Form.useForm() + const [asChecker, setAsChecker] = useState(false) + const [project, getProject] = useFetch('project/getProject', {}) + + useEffect(() => { + func.getKeywords({ limit: 100000 }) + }, []) + + useEffect(() => { + iterationId && pid && getProject({ id: pid }) + }, [pid, iterationId]) + + useEffect(() => { + project.id && form.setFieldsValue({ keywords: project.keywords }) + }, [project]) + + const onFinish = async (values) => { + const { labellers, checker } = values + const emails = [labellers] + checker && emails.push(checker) + const params = { + ...values, + projectId: pid, + labellers: emails, + doc, + name: 'task_label_' + randomNumber(), + } + const result = await func.label(params) + result && ok(result.result_dataset) + } + + function docChange(files, docFile) { + setDoc(files.length ? location.protocol + '//' + location.host + docFile : '') + } + + function onFinishFailed(errorInfo) { + console.log("Failed:", errorInfo) + } + + const getCheckedValue = (list) => list.find((item) => item.checked)["id"] + const initialValues = { + datasetId: did || undefined, + labelType: getCheckedValue(LabelTypes()), + } + return ( +
    +
    + + + {bottom ? bottom : } +
    {t('task.label.bottomtip', { link: {t('task.label.bottomtip.link.label')} })}
    +
    +
    +
    + ) +} + +const dis = (dispatch) => { + return { + getDataset(id, force) { + return dispatch({ + type: "dataset/getDataset", + payload: { id, force }, + }) + }, + label(payload) { + return dispatch({ + type: "task/label", + payload, + }) + }, + clearCache() { + return dispatch({ type: "dataset/clearCache", }) + }, + getKeywords(payload) { + return dispatch({ + type: 'keyword/getKeywords', + payload, + }) + }, + updateIteration(params) { + return dispatch({ + type: 'iteration/updateIteration', + payload: params, + }) + }, + } +} + +const stat = (state) => { + return { + datasets: state.dataset.dataset, + keywords: state.keyword.keywords.items, + } +} + +export default connect(stat, dis)(Label) diff --git a/ymir/web/src/pages/task/label/index.less b/ymir/web/src/components/task/label.less similarity index 100% rename from ymir/web/src/pages/task/label/index.less rename to ymir/web/src/components/task/label.less diff --git a/ymir/web/src/components/task/label/keepAnnotations.js b/ymir/web/src/components/task/label/keepAnnotations.js new file mode 100644 index 0000000000..5d742a95c2 --- /dev/null +++ b/ymir/web/src/components/task/label/keepAnnotations.js @@ -0,0 +1,22 @@ +import { Form, Radio } from "antd" +import t from "@/utils/t" + +const options = [ + { value: 1, label: 'gt' }, + { value: 2, label: 'pred' }, + { value: undefined, label: 'none' }, +] + +const KeepAnnotations = ({ initialValue, ...rest }) => { + const prefix = 'task.label.form.keep_anno.' + return + ({ ...opt, label: t(prefix + opt.label) }))} /> + +} + +export default KeepAnnotations diff --git a/ymir/web/src/components/task/merge.js b/ymir/web/src/components/task/merge.js new file mode 100644 index 0000000000..00aef81986 --- /dev/null +++ b/ymir/web/src/components/task/merge.js @@ -0,0 +1,140 @@ +import React, { useCallback, useEffect, useState } from "react" +import { Form, message } from "antd" +import { useHistory, useParams } from "umi" + +import { formLayout } from "@/config/antd" +import t from "@/utils/t" +import useFetch from '@/hooks/useFetch' + +import DatasetSelect from "@/components/form/datasetSelect" +import Desc from "@/components/form/desc" +import MergeType from "./merge/formItem.mergeType" +import DatasetName from "@/components/form/items/datasetName" +import Strategy from "./merge/formItem.strategy" +import SubmitButtons from "./submitButtons" + +import s from "./merge/merge.less" + +const { useWatch, useForm } = Form + +function Merge({ query = {}, hidden, ok = () => { }, bottom, }) { + const [dataset, getDataset, setDataset] = useFetch('dataset/getDataset', {}) + const [_, clearCache] = useFetch('dataset/clearCache') + const [mergeResult, merge] = useFetch('task/merge') + const pageParams = useParams() + const pid = Number(pageParams.id) + const { did, mid, iterationId, currentStage, outputKey, } = query + const history = useHistory() + const [form] = useForm() + const [group, setGroup] = useState() + const includes = useWatch('includes', form) + const excludes = useWatch('excludes', form) + const type = useWatch('type', form) + const selectedDataset = useWatch('dataset', form) + + + const initialValues = { + includes: mid ? (Array.isArray(mid) ? mid : mid.split(',').map(Number)) : [], + } + + useEffect(() => { + did && getDataset({ id: did }) + }, [did]) + + useEffect(() => dataset.id && setGroup(dataset.groupId), [dataset]) + + useEffect(() => { + if (mergeResult) { + ok(mergeResult) + message.info(t('task.fusion.create.success.msg')) + } + }, [mergeResult]) + + const checkInputs = (i) => { + return i?.excludes?.length || i?.includes?.length + } + + const onFinish = async (values) => { + if (!checkInputs(values)) { + return message.error(t('dataset.merge.validate.inputs')) + } + const params = { + ...values, + group: type ? group : undefined, + projectId: pid, + datasets: [did, selectedDataset, ...(values.includes || [])].filter(item => item), + } + await merge(params) + } + + const onFinishFailed = (err) => { + console.log("on finish failed: ", err) + } + + function filter(datasets, ids = []) { + return datasets.filter(ds => ![...ids, did].includes(ds.id)) + } + + function originDatasetChange(_, option) { + setDataset(option?.dataset || {}) + setGroup(option?.dataset?.groupId || undefined) + } + + const originFilter = useCallback(datasets => filter(datasets, [...(includes || []), ...(excludes || [])]), [includes, excludes]) + + const includesFilter = useCallback(datasets => filter(datasets, [selectedDataset, ...(excludes || [])]), [selectedDataset, excludes]) + + const excludesFilter = useCallback(datasets => filter(datasets, [selectedDataset, ...(includes || [])]), [selectedDataset, includes]) + + return ( +
    + + + {bottom ? bottom : } + +
    + ) +} + +export default Merge diff --git a/ymir/web/src/components/task/merge/formItem.mergeType.js b/ymir/web/src/components/task/merge/formItem.mergeType.js new file mode 100644 index 0000000000..9107a6a2a4 --- /dev/null +++ b/ymir/web/src/components/task/merge/formItem.mergeType.js @@ -0,0 +1,21 @@ +import { Form } from "antd" +import RadioGroup from "@/components/form/radioGroup" +import t from '@/utils/t' + +const options = [ + { value: 0, label: 'new' }, + { value: 1, label: 'exist' }, +] +const MergeType = ({ initialValue = 0, disabled = [] }) => ( + + ({ + ...option, + disabled: disabled.includes(option.value) + }))} + labelPrefix='task.merge.type.' + /> + +) + +export default MergeType diff --git a/ymir/web/src/components/task/merge/formItem.strategy.js b/ymir/web/src/components/task/merge/formItem.strategy.js new file mode 100644 index 0000000000..88d40901ff --- /dev/null +++ b/ymir/web/src/components/task/merge/formItem.strategy.js @@ -0,0 +1,23 @@ +import { Form } from "antd" +import RadioGroup from "@/components/form/radioGroup" +import t from '@/utils/t' + +const options = [ + { value: 2, label: 'latest' }, + { value: 3, label: 'original' }, + { value: 1, label: 'terminate' }, +] + +const Strategy = ({ initialValue = 2, hidden = true, ...rest }) => { + const prefix = 'task.train.form.repeatdata.' + return +} + +export default Strategy diff --git a/ymir/web/src/components/task/merge/merge.less b/ymir/web/src/components/task/merge/merge.less new file mode 100644 index 0000000000..543816fbb5 --- /dev/null +++ b/ymir/web/src/components/task/merge/merge.less @@ -0,0 +1,18 @@ +.dataset { + margin: 0 0 0 30px; +} +.keyword { + border: 1px solid #ccc; + font-size: 16px; + padding: 4px 12px; + margin-bottom: 10px; +} +.classics { + display: flex; + width: 80%; + text-align: left; + margin: auto; +} +.submit { + // margin: 50px 0 0 25%; +} diff --git a/ymir/web/src/components/task/mining.js b/ymir/web/src/components/task/mining.js new file mode 100644 index 0000000000..2b5553e500 --- /dev/null +++ b/ymir/web/src/components/task/mining.js @@ -0,0 +1,308 @@ +import React, { useEffect, useState } from "react" +import { connect } from "dva" +import { Card, Radio, Button, Form, ConfigProvider, Space, InputNumber } from "antd" +import { useHistory, useParams, useLocation } from "umi" + + +import { formLayout } from "@/config/antd" +import t from "@/utils/t" +import { HIDDENMODULES } from '@/constants/common' +import { string2Array } from '@/utils/string' +import { OPENPAI_MAX_GPU_COUNT } from '@/constants/common' +import { TYPES } from '@/constants/image' +import { randomNumber } from "@/utils/number" +import useFetch from '@/hooks/useFetch' + +import ModelSelect from "@/components/form/modelSelect" +import ImageSelect from "@/components/form/imageSelect" +import DatasetSelect from "@/components/form/datasetSelect" +import LiveCodeForm from "@/components/form/items/liveCode" +import { removeLiveCodeConfig } from "@/components/form/items/liveCodeConfig" +import DockerConfigForm from "@/components/form/items/dockerConfig" +import OpenpaiForm from "@/components/form/items/openpai" +import Desc from "@/components/form/desc" + +import styles from "./mining.less" +import SubmitButtons from "./submitButtons" + +function Mining({ query = {}, hidden, ok = () => { }, datasetCache, bottom, ...func }) { + const pageParams = useParams() + const pid = Number(pageParams.id) + const history = useHistory() + const location = useLocation() + const { mid, image, iterationId, currentStage, outputKey } = query + const stage = mid ? (Array.isArray(mid) ? mid : mid.split(',').map(Number)) : undefined + const did = Number(query.did) + const [dataset, setDataset] = useState({}) + const [selectedModel, setSelectedModel] = useState({}) + const [form] = Form.useForm() + const [seniorConfig, setSeniorConfig] = useState({}) + const [topk, setTopk] = useState(true) + const [gpu_count, setGPU] = useState(0) + const [imageHasInference, setImageHasInference] = useState(false) + const [live, setLiveCode] = useState(false) + const [openpai, setOpenpai] = useState(false) + const [sys, getSysInfo] = useFetch('common/getSysInfo', {}) + const selectOpenpai = Form.useWatch('openpai', form) + const [showConfig, setShowConfig] = useState(false) + + useEffect(() => { + getSysInfo() + }, []) + + useEffect(() => { + setGPU(sys.gpu_count || 0) + if (!HIDDENMODULES.OPENPAI) { + setOpenpai(!!sys.openpai_enabled) + } + }, [sys]) + + useEffect(() => { + setGPU(selectOpenpai ? OPENPAI_MAX_GPU_COUNT : sys.gpu_count || 0) + }, [selectOpenpai]) + + useEffect(() => { + did && func.getDataset(did) + }, [did]) + + useEffect(() => { + const cache = datasetCache[did] + if (cache) { + setDataset(cache) + } + }, [datasetCache]) + + useEffect(() => { + const state = location.state + + if (state?.record) { + const { task: { parameters, config }, description, } = state.record + const { + dataset_id, + docker_image, + docker_image_id, + model_id, + model_stage_id, + top_k, + generate_annotations, + } = parameters + form.setFieldsValue({ + datasetId: dataset_id, + gpu_count: config.gpu_count, + modelStage: [model_id, model_stage_id], + image: docker_image_id + ',' + docker_image, + topk: top_k, + inference: generate_annotations, + description, + }) + setShowConfig(true) + + setTimeout(() => setConfig(config), 500) + + history.replace({ state: {} }) + } + }, [location.state]) + + function imageChange(_, image = {}) { + const { url, configs = [] } = image + const configObj = configs.find(conf => conf.type === TYPES.MINING) || {} + const hasInference = configs.some(conf => conf.type === TYPES.INFERENCE) + setImageHasInference(hasInference) + form.setFieldsValue({ inference: hasInference }) + if (!HIDDENMODULES.LIVECODE) { + setLiveCode(image.liveCode || false) + } + setConfig(removeLiveCodeConfig(configObj.config)) + } + + function setConfig(config = {}) { + setSeniorConfig(config) + } + + const onFinish = async (values) => { + const config = { + ...values.hyperparam?.reduce( + (prev, { key, value }) => key && value ? { ...prev, [key]: value } : prev, + {}), + ...(values.live || {}), + } + + config['gpu_count'] = form.getFieldValue('gpu_count') || 0 + + const img = (form.getFieldValue('image') || '').split(',') + const imageId = Number(img[0]) + const image = img[1] + const params = { + ...values, + name: 'task_mining_' + randomNumber(), + projectId: pid, + imageId, + image, + config, + } + const result = await func.mine(params) + result && ok(result.result_dataset) + } + + function onFinishFailed(errorInfo) { + console.log("Failed:", errorInfo) + } + + function setsChange(id, option) { + setDataset(option?.dataset || {}) + } + + function modelChange(stage, options) { + if (stage && !stage[1] && options && options[1]) { + form.setFieldsValue({ modelStage: [stage[0], options[1]] }) + } + setSelectedModel(options ? options[0].model : []) + } + + const initialValues = { + modelStage: stage, + image: image ? parseInt(image) : undefined, + datasetId: did ? did : undefined, + topk: 0, + gpu_count: 0, + } + return ( +
    +
    + + + {bottom ? bottom : } + +
    +
    + ) +} + +const props = (state) => { + return { + datasetCache: state.dataset.dataset, + } +} + +const dis = (dispatch) => { + return { + getSysInfo() { + return dispatch({ + type: "common/getSysInfo", + }) + }, + getDatasets(pid, force = true) { + return dispatch({ + type: "dataset/queryAllDatasets", + payload: { pid, force }, + }) + }, + getDataset(id, force) { + return dispatch({ + type: "dataset/getDataset", + payload: { id, force }, + }) + }, + clearCache() { + return dispatch({ type: "dataset/clearCache", }) + }, + mine(payload) { + return dispatch({ + type: "task/mine", + payload, + }) + }, + updateIteration(params) { + return dispatch({ + type: 'iteration/updateIteration', + payload: params, + }) + }, + } +} + +export default connect(props, dis)(Mining) diff --git a/ymir/web/src/components/task/mining.less b/ymir/web/src/components/task/mining.less new file mode 100644 index 0000000000..a9d1e74528 --- /dev/null +++ b/ymir/web/src/components/task/mining.less @@ -0,0 +1,21 @@ +// .wrapper { +// padding: 100px; +// } +.searchLabel { + width: 200px; +} +.cacheSet { + margin: 20px 0 0 0; +} +.scoop { + display: flex; + margin: 50px 0 0 0; +} +.scoopForm :global(.ant-form-item-control-input-content) { + flex: none; +} +.paramContainer { + border: 1px solid #f4f4f4; + padding: 0 10px; + margin-bottom: 10px; +} diff --git a/ymir/web/src/components/task/progress.js b/ymir/web/src/components/task/progress.js index fd637ceed6..b1e43c0f1d 100644 --- a/ymir/web/src/components/task/progress.js +++ b/ymir/web/src/components/task/progress.js @@ -4,7 +4,7 @@ import { Button, Col, Descriptions, Progress, Row } from "antd" import t from "@/utils/t" import { toFixed } from "@/utils/number" import Terminate from "./terminate" -import { states } from "@/constants/dataset" +import { ResultStates } from "@/constants/common" import { TASKSTATES } from "@/constants/task" import StateTag from "@/components/task/stateTag" import s from "./detail.less" @@ -25,7 +25,7 @@ function TaskProgress({ state, result = {}, task = {}, fresh = () => { }, progre } function terminateVisible() { - const resultReady = state === states.READY + const resultReady = state === ResultStates.READY const isTerminated = task.is_terminated const isPending = task.state === TASKSTATES.PENDING return !isPending && resultReady && !isTerminated @@ -46,14 +46,14 @@ function TaskProgress({ state, result = {}, task = {}, fresh = () => { }, progre - {task.is_terminated && state === states.READY ? t('task.state.terminating') : <> + {task.is_terminated && state === ResultStates.READY ? t('task.state.terminating') : <> - {state === states.VALID + {state === ResultStates.VALID ? t("task.column.duration") + ": " + duration : null} } - + { + const history = useHistory() + return + + + + + + +} + +export default SubmitButtons diff --git a/ymir/web/src/components/task/training.js b/ymir/web/src/components/task/training.js new file mode 100644 index 0000000000..d13d5fa864 --- /dev/null +++ b/ymir/web/src/components/task/training.js @@ -0,0 +1,424 @@ +import React, { useCallback, useEffect, useState } from "react" +import { connect } from "dva" +import { Select, Radio, Button, Form, Space, InputNumber, Tag, Tooltip } from "antd" +import { formLayout } from "@/config/antd" +import { useHistory, useLocation, useParams } from "umi" + +import t from "@/utils/t" +import { HIDDENMODULES } from '@/constants/common' +import { string2Array, generateName } from '@/utils/string' +import { OPENPAI_MAX_GPU_COUNT } from '@/constants/common' +import { TYPES } from '@/constants/image' +import { randomNumber } from "@/utils/number" +import useFetch from '@/hooks/useFetch' + +import ImageSelect from "@/components/form/imageSelect" +import ModelSelect from "@/components/form/modelSelect" +import SampleRates from "@/components/dataset/sampleRates" +import CheckProjectDirty from "@/components/common/CheckProjectDirty" +import LiveCodeForm from "@/components/form/items/liveCode" +import { removeLiveCodeConfig } from "@/components/form/items/liveCodeConfig" +import DockerConfigForm from "@/components/form/items/dockerConfig" +import OpenpaiForm from "@/components/form/items/openpai" +import DatasetSelect from "@/components/form/datasetSelect" +import Desc from "@/components/form/desc" +import useDuplicatedCheck from "@/hooks/useDuplicatedCheck" +import TrainFormat from "./training/trainFormat" +import SubmitButtons from "./submitButtons" + +import styles from "./training/training.less" + +const TrainType = [{ value: "detection", label: 'task.train.form.traintypes.detect', checked: true }] + +const KeywordsMaxCount = 5 + +function Train({ query = {}, hidden, ok = () => { }, bottom, allDatasets, datasetCache, ...func }) { + const pageParams = useParams() + const pid = Number(pageParams.id) + const history = useHistory() + const location = useLocation() + const { mid, image, iterationId, outputKey, currentStage, test, from } = query + const stage = mid ? (Array.isArray(mid) ? mid : mid.split(',').map(Number)) : undefined + const did = Number(query.did) + const [selectedKeywords, setSelectedKeywords] = useState([]) + const [dataset, setDataset] = useState({}) + const [trainSet, setTrainSet] = useState(null) + const [testSet, setTestSet] = useState(null) + const [validationDataset, setValidationDataset] = useState(null) + const [trainDataset, setTrainDataset] = useState(null) + const [testingSetIds, setTestingSetIds] = useState([]) + const [form] = Form.useForm() + const [seniorConfig, setSeniorConfig] = useState({}) + const [gpu_count, setGPU] = useState(0) + const [projectDirty, setProjectDirty] = useState(false) + const [live, setLiveCode] = useState(false) + const [openpai, setOpenpai] = useState(false) + const checkDuplicated = useDuplicatedCheck(submit) + const [sys, getSysInfo] = useFetch('common/getSysInfo', {}) + const [project, getProject] = useFetch('project/getProject', {}) + const [updated, updateProject] = useFetch('project/updateProject') + const [fromCopy, setFromCopy] = useState(false) + + const selectOpenpai = Form.useWatch('openpai', form) + const [showConfig, setShowConfig] = useState(false) + const iterationContext = from === 'iteration' + + const renderRadio = (types) => ({ ...type, label: t(type.label) }))} /> + + useEffect(() => { + getSysInfo() + getProject({ id: pid }) + }, []) + + useEffect(() => { + setGPU(sys.gpu_count) + if (!HIDDENMODULES.OPENPAI) { + setOpenpai(!!sys.openpai_enabled) + } + }, [sys]) + + useEffect(() => { + setGPU(selectOpenpai ? OPENPAI_MAX_GPU_COUNT : sys.gpu_count || 0) + }, [selectOpenpai]) + + useEffect(() => { + setTestingSetIds(project?.testingSets || []) + iterationContext && setSelectedKeywords(project?.keywords || []) + }, [project]) + + useEffect(() => { + if (did && allDatasets?.length) { + const isValid = allDatasets.some(ds => ds.id === did) + const visibleValue = isValid ? did : null + setTrainSet(visibleValue) + form.setFieldsValue({ datasetId: visibleValue }) + } + }, [did, allDatasets]) + + useEffect(() => { + did && func.getDataset(did) + }, [did]) + + useEffect(() => { + const dst = datasetCache[did] + dst && setDataset(dst) + }, [datasetCache]) + + useEffect(() => { + pid && func.getDatasets(pid) + }, [pid]) + + useEffect(() => { + trainDataset && + !iterationContext && + !fromCopy && + setAllKeywords() + if (!trainDataset && fromCopy) { + setSelectedKeywords([]) + form.setFieldsValue({ keywords: [] }) + } + }, [trainDataset]) + + useEffect(() => { + const state = location.state + + if (state?.record) { + setFromCopy(true) + const { task: { parameters, config }, description, } = state.record + const { + dataset_id, + validation_dataset_id, + strategy, + docker_image, + docker_image_id, + model_id, + model_stage_id, + keywords, + } = parameters + form.setFieldsValue({ + datasetId: dataset_id, + keywords: keywords, + testset: validation_dataset_id, + gpu_count: config.gpu_count, + modelStage: [model_id, model_stage_id], + image: docker_image_id + ',' + docker_image, + strategy, + description, + }) + setTimeout(() => setConfig(config), 500) + setTestSet(validation_dataset_id) + setTrainSet(dataset_id) + setSelectedKeywords(keywords) + setShowConfig(true) + + history.replace({ state: {} }) + } + }, [location.state]) + + function setAllKeywords() { + const kws = trainDataset?.gt?.keywords + setSelectedKeywords(kws) + form.setFieldsValue({ keywords: kws }) + } + + function trainSetChange(value, option) { + setTrainSet(value) + setTrainDataset(option?.dataset) + } + function validationSetChange(value, option) { + setTestSet(value) + setValidationDataset(option?.dataset) + } + + function imageChange(_, image = {}) { + const { configs } = image + const configObj = (configs || []).find(conf => conf.type === TYPES.TRAINING) || {} + if (!HIDDENMODULES.LIVECODE) { + setLiveCode(image.liveCode || false) + } + setConfig(removeLiveCodeConfig(configObj.config)) + } + + function setConfig(config = {}) { + setSeniorConfig(config) + } + + const onFinish = () => checkDuplicated(trainDataset, validationDataset) + + async function submit(strategy) { + const values = form.getFieldsValue() + const config = { + ...values.hyperparam?.reduce( + (prev, { key, value }) => key !== '' && value !== '' ? { ...prev, [key]: value } : prev, + {}), + ...(values.live || {}), + } + values.trainFormat && (config['export_format'] = values.trainFormat) + + const gpuCount = form.getFieldValue('gpu_count') + + config['gpu_count'] = gpuCount || 0 + + const img = (form.getFieldValue('image') || '').split(',') + const imageId = Number(img[0]) + const image = img[1] + const params = { + ...values, + strategy, + name: 'group_' + randomNumber(), + projectId: pid, + keywords: iterationContext ? project.keywords : values.keywords, + image, + imageId, + config, + } + const result = await func.train(params) + result && ok(result.result_model) + } + + function onFinishFailed(errorInfo) { + console.log("Failed:", errorInfo) + } + + const matchKeywords = dataset => dataset.keywords.some(kw => selectedKeywords.includes(kw)) + const notTestingSet = id => !testingSetIds.includes(id) + const trainsetFilters = useCallback(datasets => datasets.filter(ds => { + const notTestSet = ds.id !== testSet + return notTestSet && notTestingSet(ds.id) + }), [testSet, testingSetIds]) + + const validationSetFilters = useCallback(datasets => datasets.filter(ds => { + const notTrainSet = ds.id !== trainSet + return matchKeywords(ds) && notTrainSet && notTestingSet(ds.id) + }), [trainSet, selectedKeywords, testingSetIds]) + + const getCheckedValue = (list) => list.find((item) => item.checked)["value"] + const initialValues = { + name: generateName('train_model'), + datasetId: did ? did : undefined, + testset: Number(test) ? Number(test) : undefined, + image: image ? parseInt(image) : undefined, + modelStage: stage, + trainType: getCheckedValue(TrainType), + gpu_count: 1, + } + return ( +
    + +
    +
    + setFormatDetailModal(false)} />
    ) } - -const actions = (dispatch) => { - return { - getInternalDataset: (payload) => { - return dispatch({ - type: 'dataset/getInternalDataset', - payload, - }) - }, - createDataset: (payload) => { - return dispatch({ - type: 'dataset/createDataset', - payload, - }) - }, - clearCache() { - return dispatch({ type: "dataset/clearCache", }) - }, - updateKeywords: (payload) => { - return dispatch({ - type: 'keyword/updateKeywords', - payload, - }) - }, - } -} - -export default connect(null, actions)(Add) +export default Add diff --git a/ymir/web/src/pages/dataset/add.less b/ymir/web/src/pages/dataset/add.less index f5887a4659..74e7d06337 100644 --- a/ymir/web/src/pages/dataset/add.less +++ b/ymir/web/src/pages/dataset/add.less @@ -5,17 +5,17 @@ background: #fff; display: flex; flex-direction: column; - height: calc(100vh - 180px); + height: calc(100vh - 186px); :global(.ant-card-body) { flex: 1; overflow-y: auto; } } .newkwTip { - @color: rgb(79, 187, 187); - background: fade(@primary-color, 10); + @color: rgb(24, 144, 255); + background: fade(rgb(44, 189, 233), 10); border-radius: 2px; - border: 1px solid fade(@primary-color, 50); + border: 1px solid rgb(24, 144, 255); line-height: 25px; padding: 6px 15px; color: @color; diff --git a/ymir/web/src/pages/dataset/analysis.js b/ymir/web/src/pages/dataset/analysis.js new file mode 100644 index 0000000000..a7a6ae3bc2 --- /dev/null +++ b/ymir/web/src/pages/dataset/analysis.js @@ -0,0 +1,345 @@ +import React, { useEffect, useState } from "react" +import { Button, Form, Row, Col, Table, Popover, Card, Radio } from "antd" +import { useParams } from "umi" + +import t from "@/utils/t" +import useFetch from "@/hooks/useFetch" +import { humanize } from "@/utils/number" + +import Breadcrumbs from '@/components/common/breadcrumb' +import Panel from "@/components/form/panel" +import DatasetSelect from "@/components/form/datasetSelect" +import AnalysisChart from "./components/analysisChart" + +import style from "./analysis.less" +import { CompareIcon } from "@/components/common/icons" + +const options = [ + { value: 'gt' }, + { value: 'pred' } +] + +function Analysis() { + const [form] = Form.useForm() + const { id: pid } = useParams() + const [remoteSource, fetchSource] = useFetch('dataset/analysis') + const [source, setSource] = useState([]) + const [datasets, setDatasets] = useState([]) + const [tableSource, setTableSource] = useState([]) + const [chartsData, setChartsData] = useState([]) + const [annotationsType, setAnnotationType] = useState(options[0].value) + + useEffect(() => { + setTableSource(source) + setAnalysisData(source) + }, [source, annotationsType]) + + useEffect(() => { + setSource(remoteSource) + }, [remoteSource]) + + function setAnalysisData(datasets) { + const chartsMap = [ + { + label: 'dataset.analysis.title.asset_bytes', + sourceField: 'assetBytes', + totalField: 'assetCount', + xUnit: 'MB', + renderEachX: x => x.replace("MB", ""), + color: ['#10BC5E', '#F2637B'] + }, + { + label: 'dataset.analysis.title.asset_hw_ratio', + sourceField: 'assetHWRatio', + totalField: 'assetCount', + color: ['#36CBCB', '#E8B900'] + }, + { + label: 'dataset.analysis.title.asset_area', + sourceField: 'assetArea', + totalField: 'assetCount', + xUnit: 'PX', + renderEachX: x => `${x / 10000}W`, + color: ['#36CBCB', '#F2637B'], + }, + { + label: 'dataset.analysis.title.asset_quality', + sourceField: 'assetQuality', + totalField: 'assetCount', + color: ['#36CBCB', '#10BC5E'], + isXUpperLimit: true, + }, + { + label: 'dataset.analysis.title.anno_area_ratio', + sourceField: 'areaRatio', + totalField: 'total', + customOptions: { + tooltipLable: 'dataset.analysis.bar.anno.tooltip', + }, + color: ['#10BC5E', '#E8B900'], + annoType: true, + isXUpperLimit: true, + }, + { + label: 'dataset.analysis.title.keyword_ratio', + sourceField: 'keywords', + totalField: 'total', + color: ['#2CBDE9', '#E8B900'], + annoType: true, + xType: 'attribute' + }, + ] + + const chartsConfig = datasets ? chartsMap.map(chart => { + const xData = chart.xType === 'attribute' ? getAttrXData(chart, datasets) : getXData(chart, datasets) + const yData = chart.xType === 'attribute' ? getAttrYData(chart, datasets, xData) : getYData(chart, datasets) + return { + label: chart.label, + customOptions: { + ...chart.customOptions, + xData, + color: chart.color, + xUnit: chart.xUnit, + yData + }, + } + }) : [] + setChartsData(chartsConfig) + } + + const getField = (item = {}, field, annoType) => { + return annoType && item[annotationsType] ? item[annotationsType][field] : item[field] + } + + function getXData({ sourceField, isXUpperLimit = false, annoType, renderEachX = x => x }, datasets) { + const dataset = datasets.find(item => { + const target = getField(item, sourceField, annoType) + return target && target.length > 0 + }) || datasets[0] + const field = getField(dataset, sourceField, annoType) + const xData = field ? field.map(item => renderEachX(item.x)) : [] + const transferXData = xData.map((x, index) => { + if (index === xData.length - 1) { + return isXUpperLimit ? x : `[${x},+)` + } else { + return `[${x},${xData[index + 1]})` + } + }) + return transferXData + } + + function getYData({ sourceField, annoType, totalField }, datasets) { + const yData = datasets && datasets.map(dataset => { + const total = getField(dataset, totalField, annoType) + const name = `${dataset.name} ${dataset.versionName}` + const field = getField(dataset, sourceField, annoType) + return { + name, + value: field.map(item => total ? (item.y / total).toFixed(4) : 0), + count: field.map(item => item.y) + } + }) + return yData + } + + function getAttrXData({ sourceField, annoType }, datasets) { + let xData = [] + datasets && datasets.forEach((dataset) => { + const field = getField(dataset, sourceField, annoType) + const datasetAttrs = Object.keys(field || {}) + xData = [...new Set([...xData, ...datasetAttrs])] + }) + return xData + } + + function getAttrYData({ sourceField, annoType, totalField }, datasets, xData) { + const yData = datasets && datasets.map(dataset => { + const total = getField(dataset, totalField, annoType) + const name = `${dataset.name} ${dataset.versionName}` + const attrObj = getField(dataset, sourceField, annoType) + return { + name, + value: xData.map(key => total ? (attrObj[key] ? (attrObj[key] / total).toFixed(4) : 0) : 0), + count: xData.map(key => attrObj[key] || 0) + } + }) + return yData + } + + function datasetsChange(values, options) { + setDatasets(options.map(option => option.dataset)) + } + + const onFinish = async (values) => { + const params = { + pid, + datasets: values.datasets + } + fetchSource(params) + } + + function onFinishFailed(errorInfo) { + console.log("Failed:", errorInfo) + } + + function retry() { + setSource(null) + } + + function showTitle(str) { + return {t(str)} + } + + const columns = [ + { + title: showTitle('dataset.analysis.column.name'), + dataIndex: "name", + ellipsis: true, + align: 'center', + className: style.colunmClass, + }, + { + title: showTitle('dataset.analysis.column.version'), + dataIndex: "versionName", + ellipsis: true, + align: 'center', + width: 80, + className: style.colunmClass, + }, + { + title: showTitle('dataset.analysis.column.size'), + dataIndex: "totalAssetMbytes", + ellipsis: true, + align: 'center', + className: style.colunmClass, + render: (num) => { + return num && {num}MB + }, + }, + { + title: showTitle('dataset.analysis.column.box_count'), + dataIndex: 'total', + ellipsis: true, + align: 'center', + className: style.colunmClass, + render: (_, record) => { + const num = getField(record, 'total', true) + return renderPop(humanize(num), num) + }, + }, + { + title: showTitle('dataset.analysis.column.average_labels'), + dataIndex: 'average', + ellipsis: true, + align: 'center', + className: style.colunmClass, + render: (_, record) => getField(record, 'average', true), + }, + { + title: showTitle('dataset.analysis.column.overall'), + dataIndex: 'metrics', + ellipsis: true, + align: 'center', + className: style.colunmClass, + render: (text, record) => { + const total = record.assetCount + const negative = getField(record, 'negative', true) + return renderPop(`${humanize(total - negative)}/${humanize(total)}`, `${total - negative}/${total}`) + }, + }, + ] + + function renderPop(label, content = {}) { + return + {label} + + } + + async function validDatasetCount(rule, value) { + const count = 5 + if (value?.length > count) { + return Promise.reject(t('dataset.analysis.validator.dataset.count', { count })) + } else { + return Promise.resolve() + } + } + + const initialValues = {} + + return ( +
    + + + + +
    + ({ ...opt, label: t(`annotation.${opt.value}`) }))} + onChange={({ target: { value } }) => setAnnotationType(value)} + > +
    + record.name + record.versionName} + rowClassName={style.rowClass} + className={style.tableClass} + columns={columns} + pagination={false} + /> + + {chartsData.map(chart => ( + +
    {t(chart.label)}
    + + + ))} + + + + + +
    + + + + +
    + +
    +
    + +
    + + + + + + ) +} + +export default Analysis diff --git a/ymir/web/src/pages/dataset/analysis.less b/ymir/web/src/pages/dataset/analysis.less new file mode 100644 index 0000000000..323d1dd155 --- /dev/null +++ b/ymir/web/src/pages/dataset/analysis.less @@ -0,0 +1,36 @@ +.dataContainer { + padding: 10px 0; +} + +.filters { + line-height: 40px; +} + +.rowData { + border: 1px solid rgba(0, 0, 0, 0.06); + height: calc(100vh - 270px); + overflow-y: auto; + .tableClass { + max-width: calc(100% + 20px); + margin: 0 -10px; + } + .rowClass { + height: 40px; + } + tr.rowClass:hover > td { + background-color: rgba(232, 185, 0, 0.1); + } + th.colunmClass { + background: rgba(232, 185, 0, 0.2); + } + td.colunmClass { + background: rgba(232, 185, 0, 0.1); + } +} + +.echartTitle { + background: rgba(0, 0, 0, 0.06); + text-align: center; + padding: 10px; + margin: 10px 0; +} \ No newline at end of file diff --git a/ymir/web/src/pages/dataset/assets.js b/ymir/web/src/pages/dataset/assets.js index cbe34d4b6c..94f0c96f87 100644 --- a/ymir/web/src/pages/dataset/assets.js +++ b/ymir/web/src/pages/dataset/assets.js @@ -1,83 +1,74 @@ -import React, { useEffect, useState } from "react" -import { useParams, useHistory } from "umi" -import { connect } from "dva" -import { Select, Pagination, Image, Row, Col, Button, Space, Card, Descriptions, Tag, Modal } from "antd" +import React, { useCallback, useEffect, useRef, useState } from "react" +import { useParams } from "umi" +import { Select, Pagination, Row, Col, Button, Space, Card, Tag, Modal } from "antd" import t from "@/utils/t" -import Breadcrumbs from "@/components/common/breadcrumb" +import useFetch from '@/hooks/useFetch' import { randomBetween, percent } from '@/utils/number' + +import Breadcrumbs from "@/components/common/breadcrumb" import Asset from "./components/asset" import styles from "./assets.less" -import { ScreenIcon, TaggingIcon, TrainIcon, VectorIcon, WajueIcon, } from "@/components/common/icons" +import GtSelector from "@/components/form/gtSelector" +import ImageAnnotation from "@/components/dataset/imageAnnotation" +import useWindowResize from "@/hooks/useWindowResize" +import KeywordSelector from "./components/keywordSelector" +import EvaluationSelector from "@/components/form/evaluationSelector" const { Option } = Select -function rand(n, m, exclude) { - const result = Math.min(m, n) + Math.floor(Math.random() * Math.abs(m - n)) - - if (result === exclude) { - return rand(n, m, exclude) - } - if (result < 0) { - return 0 - } - return result -} - -const Dataset = ({ getDataset, getAssetsOfDataset }) => { - const { did: id } = useParams() +const Dataset = () => { + const { id: pid, did: id } = useParams() const initQuery = { id, - keyword: null, + keywords: [], offset: 0, limit: 20, } - const history = useHistory() const [filterParams, setFilterParams] = useState(initQuery) - const [dataset, setDataset] = useState({ id }) - const [assets, setAssets] = useState([]) - const [total, setTotal] = useState(0) const [currentPage, setCurrentPage] = useState(1) const [assetVisible, setAssetVisible] = useState(false) const [currentAsset, setCurrentAsset] = useState({ hash: null, index: 0, }) + const listRef = useRef(null) + const windowWidth = useWindowResize() + const [dataset, getDataset] = useFetch('dataset/getDataset', {}) + const [{ items: assets, total }, getAssets, setAssets] = useFetch('dataset/getAssetsOfDataset', { items: [], total: 0 }) - useEffect(async () => { - const data = await getDataset(id) - if (data) { - setDataset(data) - } + useEffect(() => { + getDataset({ id, verbose: true }) }, [id]) useEffect(() => { setCurrentPage((filterParams.offset / filterParams.limit) + 1) - filter(filterParams) - }, [filterParams]) - - const filterKw = (kw) => { - const keyword = kw ? kw : undefined - setFilterParams((params) => ({ - ...params, - keyword, - offset: initQuery.offset, - })) + dataset.id && filter(filterParams) + }, [dataset, filterParams]) + + const filterKw = ({ type, selected }) => { + const s = selected.map(item => Array.isArray(item) ? item.join(':') : item) + if (s.length || (!s.length && filterParams.keywords.length > 0)) { + setFilterParams((params) => ({ + ...params, + type, + keywords: s, + offset: initQuery.offset, + })) + } } + const filterPage = (page, pageSize) => { setCurrentPage(page) const limit = pageSize const offset = limit * (page - 1) setFilterParams((params) => ({ ...params, offset, limit })) } - const filter = async (param) => { - setAssets([]) - const { items, total } = await getAssetsOfDataset(param) - setTotal(total) - setAssets(items) + const filter = (param) => { + getAssets({ ...param, datasetKeywords: dataset?.keywords }) } - const goAsset = (hash, index) => { - setCurrentAsset({ hash, index: filterParams.offset + index}) + const goAsset = (asset, hash, index) => { + setCurrentAsset({ asset, hash, index: filterParams.offset + index }) setAssetVisible(true) } @@ -88,8 +79,26 @@ const Dataset = ({ getDataset, getAssetsOfDataset }) => { filterPage(page, limit) } - const getRate = (count) => { - return percent(count / dataset.assetCount) + const filterAnnotations = useCallback(annotations => { + const cm = filterParams.cm || [] + const annoType = filterParams.annoType || [] + const gtFilter = annotation => !annoType.length || annoType.some(selected => selected === 'gt' ? annotation.gt : !annotation.gt) + const evaluationFilter = annotation => !cm.length || cm.includes(annotation.cm) + return annotations.filter(annotation => gtFilter(annotation) && evaluationFilter(annotation)) + }, [filterParams.cm, filterParams.annoType]) + + const updateFilterParams = (value, field) => { + if (value?.length || (filterParams[field]?.length && !value?.length)) { + setFilterParams(query => ({ + ...query, + [field]: value, + offset: initQuery.offset, + })) + } + } + + const reset = () => { + setFilterParams(initQuery) } const randomPageButton = ( @@ -98,77 +107,77 @@ const Dataset = ({ getDataset, getAssetsOfDataset }) => { ) - const renderList = (list, row = 5) => { + const renderList = useCallback((list, row = 5) => { let r = 0, result = [] while (r < list.length) { result.push(list.slice(r, r + row)) r += row } - return result.map((rows, index) => ( - - {rows.map((asset, rowIndex) => ( - -
    goAsset(asset.hash, index * row + rowIndex)} - > - - { + const h = listRef.current?.clientWidth / rows.reduce((prev, row) => { + return (prev + row.metadata.width / row.metadata.height) + }, 0) + + return ( + + {rows.map((asset, rowIndex) => ( +
    +
    goAsset(asset, asset.hash, index * row + rowIndex)} > - {t("dataset.detail.assets.keywords.total", { - total: asset?.keywords?.length, - })} - - - {asset.keywords.slice(0, 4).map(key => {key})} - {asset.keywords.length > 4 ? ... : null} - -
    - - ))} - - )) - } + + + {t("dataset.detail.assets.keywords.total", { + total: asset?.keywords?.length, + })} + + + {asset.keywords.slice(0, 4).map(key => {key})} + {asset.keywords.length > 4 ? ... : null} + + + + ))} + + ) + } + ) + }, [windowWidth, filterParams]) const renderTitle = - + {dataset.name} {dataset.versionName} {t("dataset.detail.pager.total", { total: total + '/' + dataset.assetCount })} + {dataset?.inferClass ?
    {t('dataset.detail.infer.class')}{dataset?.inferClass?.map(cls => {cls})}
    : null}
    - - {t("dataset.detail.keyword.label")} - + + + updateFilterParams(checked, 'annoType')} /> + updateFilterParams(checked, 'cm')} labelAlign={'right'} /> + + + const assetDetail = setAssetVisible(false)} width={null} footer={null}> - + return ( @@ -176,7 +185,9 @@ const Dataset = ({ getDataset, getAssetsOfDataset }) => { {assetDetail} - {renderList(assets)} +
    + {renderList(assets)} +
    { ) } -const mapStateToProps = (state) => { - return { - logined: state.user.logined, - } -} - -const mapDispatchToProps = (dispatch) => { - return { - getDataset(id, force) { - return dispatch({ - type: "dataset/getDataset", - payload: { id, force }, - }) - }, - getAssetsOfDataset(payload) { - return dispatch({ - type: "dataset/getAssetsOfDataset", - payload, - }) - }, - } -} - -export default connect(mapStateToProps, mapDispatchToProps)(Dataset) +export default Dataset diff --git a/ymir/web/src/pages/dataset/assets.less b/ymir/web/src/pages/dataset/assets.less index e2852da07f..68393fb686 100644 --- a/ymir/web/src/pages/dataset/assets.less +++ b/ymir/web/src/pages/dataset/assets.less @@ -8,16 +8,23 @@ .list { padding-top: 10px; } +.dataset_container { + background-color: rgba(0, 0, 0, 0.8); + padding: 2px 0; +} .dataset_item { - padding: 10px; - margin-bottom: 10px; + // padding: 10px; + // margin-bottom: 10px; } .dataset_img { width: 100%; - height: 200px; - line-height: 194px; - text-align: center; - border: 1px solid rgba(0, 0, 0, 0.06); + height: 100%; + // height: 200px; + // line-height: 194px; + // text-align: center; + // border-color: rgba(0, 0, 0, 0.8); + // border-style: solid; + // border-width: 1px 2px; overflow: hidden; cursor: pointer; position: relative; diff --git a/ymir/web/src/pages/dataset/components/analysisChart.js b/ymir/web/src/pages/dataset/components/analysisChart.js new file mode 100644 index 0000000000..d7f1850e9b --- /dev/null +++ b/ymir/web/src/pages/dataset/components/analysisChart.js @@ -0,0 +1,121 @@ +import BarChart from "@/components/chart/bar" +import { useEffect, useState } from "react" +import { percent } from "@/utils/number" +import t from "@/utils/t" + +const AnalysisChartBar = ({ customOptions = {}, ...resProps}) => { + const [option, setOption] = useState({}) + const [series, setSeries] = useState([]) + const { + xData, + xUnit, + yData, + seriesType = 'bar', + barWidth = 8, + grid, + legend, + color, + tooltipLable = 'dataset.analysis.bar.asset.tooltip', + yAxisFormatter = function (val) { + return val * 100 + '%'; + }, + } = customOptions + + const defaultLegend = { itemHeight: 8, itemWidth: 20 } + + const defaultGrid = { + left: '3%', + right: 50, + bottom: '3%', + containLabel: true + } + + const tooltip = { + trigger: 'axis', + axisPointer: { + type: 'shadow' + }, + formatter: function (params) { + var res = `${params[0].name}`; + for (var i = 0, l = params.length; i < l; i++) { + const indexColor = params[i].color; + res += `
    `; + const name = params[i].seriesName; + const ratio = percent(params[i].value); + const amount = yData[params[i].seriesIndex].count[params[i].dataIndex]; + res += `${name} + ${t(tooltipLable, { ratio, amount })}`; + } + return res; + } + } + + const yAxis = [ + { + type: 'value', + splitLine: { + lineStyle: { + type: 'dashed' + } + }, + axisLabel: { + formatter: yAxisFormatter + } + } + ] + + useEffect(async () => { + const transData = transferData() + setSeries(transData) + }, [customOptions]) + + useEffect(() => { + if(!series.length) { + setOption({}) + return + } + const xAxis = [ + { + type: 'category', + axisLine: { + show: false + }, + axisTick: { + show: false + }, + name: xUnit ? `(${xUnit})` : '', + data: xData, + axisLabel: { + rotate: xData.length > 10 ? 45 : 0, + } + } + ] + setOption({ + tooltip, + legend: Object.assign(defaultLegend, legend), + grid: Object.assign(defaultGrid, grid), + yAxis, + xAxis, + color, + series, + }) + }, [series]) + + function transferData() { + const series = yData.map((item) => ( + { + name: item.name, + type: seriesType, + barWidth, + data: item.value + } + )) + return series + } + + return ( + + ) +} + +export default AnalysisChartBar; diff --git a/ymir/web/src/pages/dataset/components/asset.js b/ymir/web/src/pages/dataset/components/asset.js index b4a91acab7..caf207a161 100644 --- a/ymir/web/src/pages/dataset/components/asset.js +++ b/ymir/web/src/pages/dataset/components/asset.js @@ -1,32 +1,35 @@ -import { useHistory, useParams } from "react-router" -import React, { useEffect, useRef, useState } from "react" +import React, { useEffect, useState } from "react" import { Button, Card, Col, Descriptions, Row, Tag, Space } from "antd" -import { connect } from "dva" import { getDateFromTimestamp } from "@/utils/date" import t from "@/utils/t" -import Hash from "@/components/common/hash" -import AssetAnnotation from "@/components/dataset/asset_annotation" import { randomBetween } from "@/utils/number" +import useFetch from '@/hooks/useFetch' + +import Hash from "@/components/common/hash" +import AssetAnnotation from "@/components/dataset/assetAnnotation" +import GtSelector from "@/components/form/gtSelector" + import styles from "./asset.less" -import { ArrowRightIcon, NavDatasetIcon, EyeOffIcon, EyeOnIcon } from '@/components/common/icons' +import { NavDatasetIcon, EyeOffIcon, EyeOnIcon } from '@/components/common/icons' import { LeftOutlined, RightOutlined } from '@ant-design/icons' +import EvaluationSelector from "@/components/form/evaluationSelector" const { CheckableTag } = Tag +const { Item } = Descriptions -const KeywordColor = ["green", "red", "cyan", "blue", "yellow", "purple", "magenta", "orange", "gold"] - -function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfDataset, index = 0, total = 0 }) { - const history = useHistory() +function Asset({ id, asset: cache, datasetKeywords, filterKeyword, filters, index = 0, total = 0 }) { const [asset, setAsset] = useState({}) const [current, setCurrent] = useState('') const [showAnnotations, setShowAnnotations] = useState([]) const [selectedKeywords, setSelectedKeywords] = useState([]) const [currentIndex, setCurrentIndex] = useState(null) const [assetHistory, setAssetHistory] = useState([]) - const [colors] = useState(datasetKeywords.reduce((prev, curr, i) => - ({ ...prev, [curr]: KeywordColor[i % KeywordColor.length] }), {})) + const [evaluation, setEvaluation] = useState([]) + const [gtSelected, setGtSelected] = useState([]) + const [colors, setColors] = useState({}) + const [{ items: assets }, getAssets] = useFetch('dataset/getAssetsOfDataset', { items: [] }) useEffect(() => { setAsset({}) @@ -41,36 +44,37 @@ function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfD }, [currentIndex]) useEffect(() => { - if (!current) { - return + if (cache) { + setAsset(cache) + setCurrent(cache.hash) } - fetchAsset() - }, [current]) + }, [cache]) useEffect(() => { - setShowAnnotations((asset.annotations || []).filter(anno => selectedKeywords.indexOf(anno.keyword) >= 0)) - }, [selectedKeywords]) - - async function fetchAsset() { - const compare = (a, b) => { - const aa = (a.keyword || a).toUpperCase() - const bb = (b.keyword || b).toUpperCase() - return aa > bb ? -1 : (aa < bb ? 1 : 0) + if (!asset.hash) { + return } + const { annotations } = asset + setSelectedKeywords(asset.keywords) + setCurrent(asset.hash) + setColors(annotations.reduce((prev, annotation) => ({ ...prev, [annotation.keyword]: annotation.color }), {})) + }, [asset]) - const result = await getAsset(id, current) - const keywords = result.keywords.sort(compare) - const annotations = result.annotations.sort(compare).map(anno => ({ ...anno, color: colors[anno.keyword] })) - setAsset({ ...result, keywords, annotations }) - setSelectedKeywords(keywords) - } + useEffect(() => { + assets.length && setAsset(assets[0]) + }, [assets]) - async function fetchAssetHash() { - const result = await getAssetsOfDataset({ id, keyword: currentIndex.keyword, offset: currentIndex.index, limit: 1 }) - if (result?.items) { - const ass = result.items[0] - setCurrent(ass.hash) - } + useEffect(() => { + const keywordFilter = annotation => selectedKeywords.indexOf(annotation.keyword) >= 0 + const gtFilter = annotation => !gtSelected?.length || gtSelected.some(selected => selected === 'gt' ? annotation.gt : !annotation.gt) + const evaluationFilter = annotation => !evaluation?.length || evaluation.includes(annotation.cm) + const filters = annotation => keywordFilter(annotation) && evaluationFilter(annotation) && gtFilter(annotation) + const visibleAnnotations = (asset.annotations || []).filter(filters) + setShowAnnotations(visibleAnnotations) + }, [selectedKeywords, evaluation, asset, gtSelected]) + + function fetchAssetHash() { + getAssets({ id, ...filters, keyword: currentIndex.keyword, offset: currentIndex.index, limit: 1, datasetKeywords }) } function next() { @@ -82,7 +86,7 @@ function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfD } function random() { - setCurrentIndex(cu => ({ ...cu, index: randomBetween(0, total - 1, cu.index)})) + setCurrentIndex(cu => ({ ...cu, index: randomBetween(0, total - 1, cu.index) })) } function back() { @@ -109,8 +113,10 @@ function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfD
    -
    + + {asset.annotations ? ( {t("dataset.asset.info")}} bordered={false} + className='noShadow' style={{ marginRight: 20 }} headStyle={{ paddingLeft: 0 }} bodyStyle={{ padding: "20px 0" }} @@ -134,27 +141,27 @@ function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfD contentStyle={{ flexWrap: 'wrap', padding: '10px' }} labelStyle={{ justifyContent: 'flex-end', padding: '10px' }} > - + - - + + {asset.metadata?.width} - - + + {asset.metadata?.height} - + {asset.size ? ( - + {asset.size} - + ) : null} - - {asset.metadata?.channel} - - - {getDateFromTimestamp(asset.metadata?.timestamp)} - - + + {asset.metadata?.image_channels} + + + {getDateFromTimestamp(asset.metadata?.timestamp?.start)} + + {asset.keywords?.map((keyword, i) => ( @@ -163,6 +170,7 @@ function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfD onChange={(checked) => changeKeywords(keyword, checked)} className={'ant-tag-' + colors[keyword]} key={i} + color={colors[keyword]} > {keyword} @@ -174,8 +182,18 @@ function Asset({ id, datasetKeywords = [], filterKeyword, getAsset, getAssetsOfD } - + + + {Object.keys(asset.cks).map(ck => + {ck}: {asset.cks[ck]} + )} + + + + + + + {renderKeywords(currentType)} + + + ) +} + +export default KeywordSelector diff --git a/ymir/web/src/pages/dataset/detail.js b/ymir/web/src/pages/dataset/detail.js index ec86166713..1a26bb5894 100644 --- a/ymir/web/src/pages/dataset/detail.js +++ b/ymir/web/src/pages/dataset/detail.js @@ -1,27 +1,35 @@ import React, { useEffect, useRef, useState } from "react" -import { connect } from "dva" -import { useHistory, useParams, Link } from "umi" -import { Button, Card, Space } from "antd" +import { useHistory, useParams, Link, useSelector } from "umi" +import { Button, Card, message, Space } from "antd" import t from "@/utils/t" import { TASKTYPES, getTaskTypeLabel } from "@/constants/task" +import useFetch from '@/hooks/useFetch' +import useRestore from "@/hooks/useRestore" +import { canHide } from '@/constants/dataset' + import Breadcrumbs from "@/components/common/breadcrumb" import TaskDetail from "@/components/task/detail" import Detail from "@/components/dataset/detail" -import s from "./detail.less" import TaskProgress from "@/components/task/progress" import Error from "@/components/task/error" import Hide from "@/components/common/hide" -import useRestore from "@/hooks/useRestore" +import useCardTitle from '@/hooks/useCardTitle' -const taskTypes = ["fusion", "train", "mining", "label", 'inference', 'copy'] +import s from "./detail.less" +import useRerunAction from "../../hooks/useRerunAction" + +const taskTypes = ["merge", "filter", "train", "mining", "label", 'inference', 'copy'] -function DatasetDetail({ datasetCache, getDataset }) { +function DatasetDetail() { const history = useHistory() const { id: pid, did: id } = useParams() - const [dataset, setDataset] = useState({}) + const [dataset, getDataset, setDataset] = useFetch('dataset/getDataset', {}) + const datasetCache = useSelector(({ dataset }) => dataset.dataset) const hideRef = useRef(null) const restoreAction = useRestore(pid) + const generateRerunBtn = useRerunAction('btn') + const cardTitle = useCardTitle('dataset.detail.title') useEffect(() => { fetchDataset(true) @@ -35,8 +43,8 @@ function DatasetDetail({ datasetCache, getDataset }) { } }, [datasetCache]) - async function fetchDataset(force) { - await getDataset(id, force) + function fetchDataset(force) { + getDataset({ id, verbose: true, force }) } const hide = (version) => { @@ -61,15 +69,21 @@ function DatasetDetail({ datasetCache, getDataset }) {
    " + t(getTaskTypeLabel(dataset.taskType))} + title={cardTitle} >
    - fetchDataset(true)} /> - {dataset?.task?.error_code ? : null} + fetchDataset(true)} + /> + {dataset.taskType === TASKTYPES.LABEL ? ( @@ -80,27 +94,24 @@ function DatasetDetail({ datasetCache, getDataset }) {
    ) : null} {!dataset.hidden ? <> - {taskTypes.map((type) => ( + {taskTypes.map((type, index) => index === 0 || dataset.assetCount > 0 ? ( - ))} - - + : null} : } - + {generateRerunBtn(dataset)}
    @@ -109,21 +120,4 @@ function DatasetDetail({ datasetCache, getDataset }) { ) } -const props = (state) => { - return { - datasetCache: state.dataset.dataset, - } -} - -const actions = (dispatch) => { - return { - getDataset: (id, force) => { - return dispatch({ - type: "dataset/getDataset", - payload: { id, force }, - }) - }, - } -} - -export default connect(props, actions)(DatasetDetail) +export default DatasetDetail diff --git a/ymir/web/src/pages/image/add.js b/ymir/web/src/pages/image/add.js index bf900c1e07..6bf2709a9f 100644 --- a/ymir/web/src/pages/image/add.js +++ b/ymir/web/src/pages/image/add.js @@ -5,8 +5,8 @@ import { useParams, useHistory, useLocation } from "umi" import s from './add.less' import t from '@/utils/t' +import { formLayout } from "@/config/antd" import Breadcrumbs from '@/components/common/breadcrumb' -import Tip from '@/components/form/tip' const { useForm } = Form @@ -60,7 +60,7 @@ const Add = ({ getImage, createImage, updateImage }) => { } const checkImageUrl = (_, value) => { - const reg = /^([a-zA-Z0-9]{4,30}\/)?[a-z0-9]+(?:[._-][a-z0-9]+)*(:[a-zA-Z0-9._-]+)?$/ + const reg = /^[^\s]+$/ if (!value || reg.test(value.trim())) { return Promise.resolve() } @@ -110,57 +110,52 @@ const Add = ({ getImage, createImage, updateImage }) => {
    -
    - - - - - - - - setUserInput(true)} /> - - - - - - - - + + + + + + setUserInput(true)} /> + + + + + + + + + + + + + +
    diff --git a/ymir/web/src/pages/image/components/del.js b/ymir/web/src/pages/image/components/del.js index c198c88c4c..1322e5651e 100644 --- a/ymir/web/src/pages/image/components/del.js +++ b/ymir/web/src/pages/image/components/del.js @@ -1,9 +1,19 @@ +import { forwardRef, useEffect, useImperativeHandle } from "react" + import t from "@/utils/t" +import useFetch from '@/hooks/useFetch' + import confirm from '@/components/common/dangerConfirm' -import { connect } from "dva" -import { forwardRef, useImperativeHandle } from "react" -const Del = forwardRef(({ delImage, ok = () => {} }, ref) => { +const Del = forwardRef(({ ok = () => {} }, ref) => { + const [delResult, delImage] = useFetch('image/delImage') + + useEffect(() => { + if (delResult) { + ok(delResult.id) + } + }, [delResult]) + useImperativeHandle(ref, () => { return { del, @@ -13,12 +23,7 @@ const Del = forwardRef(({ delImage, ok = () => {} }, ref) => { function del(id, name) { confirm({ content: t("image.del.confirm.content", { name }), - onOk: async () => { - const result = await delImage(id) - if (result) { - ok(id) - } - }, + onOk: () => delImage(id), okText: t('common.del'), }) } @@ -26,15 +31,4 @@ const Del = forwardRef(({ delImage, ok = () => {} }, ref) => { return null }) -const actions = (dispatch) => { - return { - delImage(id) { - return dispatch({ - type: 'image/delImage', - payload: id, - }) - } - } -} - -export default connect(null, actions, null, { forwardRef: true })(Del) \ No newline at end of file +export default Del \ No newline at end of file diff --git a/ymir/web/src/pages/image/components/list.js b/ymir/web/src/pages/image/components/list.js index 76fa8573f1..2a2b8017be 100644 --- a/ymir/web/src/pages/image/components/list.js +++ b/ymir/web/src/pages/image/components/list.js @@ -5,6 +5,7 @@ import { useHistory } from "umi" import { List, Skeleton, Space, Button, Pagination, Col, Row, } from "antd" import t from "@/utils/t" +import { HIDDENMODULES } from '@/constants/common' import { ROLES } from '@/constants/user' import { TYPES, STATES, getImageTypeLabel, imageIsPending } from '@/constants/image' import ShareModal from "./share" @@ -19,6 +20,7 @@ import { LoadingOutlined } from '@ant-design/icons' const initQuery = { name: undefined, type: undefined, + current: 1, offset: 0, limit: 20, } @@ -45,7 +47,7 @@ const ImageList = ({ role, filter, getImages }) => { const pageChange = (current, pageSize) => { const limit = pageSize const offset = (current - 1) * pageSize - setQuery((old) => ({ ...old, limit, offset })) + setQuery((old) => ({ ...old, current, limit, offset })) } async function getData() { @@ -62,7 +64,7 @@ const ImageList = ({ role, filter, getImages }) => { } const moreList = (record) => { - const { id, name, state, functions, url, related, is_shared } = record + const { id, name, state, functions, url, related, isShared } = record const menus = [ { @@ -76,7 +78,7 @@ const ImageList = ({ role, filter, getImages }) => { key: "share", label: t("image.action.share"), onclick: () => share(id, name), - hidden: () => !isDone(state) || is_shared, + hidden: () => !isDone(state) || isShared, icon: , }, { @@ -153,7 +155,11 @@ const ImageList = ({ role, filter, getImages }) => { [STATES.DONE]: , [STATES.ERROR]: , } - return states[state] + return {states[state]} + } + + const liveCodeState = (live) => { + return {t(live ? 'image.livecode.label.remote' : 'image.livecode.label.local')} } const addBtn = ( @@ -162,14 +168,18 @@ const ImageList = ({ role, filter, getImages }) => { const renderItem = (item) => { const title = -
    {item.name}{imageState(item.state)} + + {item.name} + {imageState(item.state)} + {isDone(item.state) && !HIDDENMODULES.LIVECODE ? liveCodeState(item.liveCode) : null} + {more(item)} const type = isTrain(item.functions) ? 'train' : 'mining' const desc = - - {t('image.list.item.type')}{getImageTypeLabel(item.functions).map(label => t(label)).join(', ')} - {t('image.list.item.url')}{item.url} + + {t('image.list.item.type')}{getImageTypeLabel(item.functions).map(label => t(label)).join(', ')} + {t('image.list.item.url')}{item.url} {t('image.list.item.desc')}{item.description} {isTrain(item.functions) && item.related?.length ?
    {t('image.list.item.related')}
    : null} @@ -193,7 +203,8 @@ const ImageList = ({ role, filter, getImages }) => { renderItem={renderItem} /> t('image.list.total', { total })} showQuickJumper showSizeChanger /> diff --git a/ymir/web/src/pages/image/components/list.less b/ymir/web/src/pages/image/components/list.less index a44f2c6452..950b2a075b 100644 --- a/ymir/web/src/pages/image/components/list.less +++ b/ymir/web/src/pages/image/components/list.less @@ -19,3 +19,20 @@ display: flex; justify-content: flex-end; } +.remote, .local { + display: inline-block; + padding: 0 8px; + font-size: 14px; + color: #fff; + border-radius: 46px; + font-weight: normal; +} +.remote { + background-color: @btn-primary-bg; +} +.local { + background-color: @primary-color; +} +.info { + margin: 10px 0; +} diff --git a/ymir/web/src/pages/image/components/relate.js b/ymir/web/src/pages/image/components/relate.js index 73c83fa8e5..38b5de5de1 100644 --- a/ymir/web/src/pages/image/components/relate.js +++ b/ymir/web/src/pages/image/components/relate.js @@ -1,26 +1,37 @@ -import { Modal, Form, Input, Select, message } from "antd" +import { Modal, Form, Select, message } from "antd" import { forwardRef, useEffect, useState, useImperativeHandle } from "react" -import { connect } from 'dva' import t from '@/utils/t' -import { TYPES, STATES } from '@/constants/image' +import { TYPES } from '@/constants/image' +import useFetch from '@/hooks/useFetch' const { useForm } = Form -const RelateModal = forwardRef(({ getMiningImage, relate, ok = () => { } }, ref) => { +const RelateModal = forwardRef(({ ok = () => { } }, ref) => { const [visible, setVisible] = useState(false) const [links, setLinks] = useState([]) - const [images, setImages] = useState([]) const [id, setId] = useState(null) const [imageName, setImageName] = useState('') const [linkForm] = useForm() + const [relateResult, relate] = useFetch('image/relateImage') + const [{ items: images }, getMiningImages] = useFetch('image/getImages', { items: [] }) - useEffect(() => { - linkForm.setFieldsValue({ relations: links.map(image => image.id) }) - }, [links, visible]) + useEffect(() => linkForm.setFieldsValue({ + relations: links.map(image => image.id) + }), [links, visible]) + + useEffect(() => visible && getMiningImages({ + type: TYPES.MINING, + offset: 0, + limit: 10000, + }), [visible]) useEffect(() => { - visible && fetchMiningImages() - }, [visible]) + if (relateResult) { + message.success(t('image.link.success')) + setVisible(false) + ok() + } + }, [relateResult]) useImperativeHandle(ref, () => ({ show: ({ id, name, related }) => { @@ -34,25 +45,20 @@ const RelateModal = forwardRef(({ getMiningImage, relate, ok = () => { } }, ref) const linkModalCancel = () => setVisible(false) const submitLink = () => { - linkForm.validateFields().then(async () => { + linkForm.validateFields().then(() => { const { relations } = linkForm.getFieldValue() - const result = await relate(id, relations) - if (result) { - message.success(t('image.link.success')) - setVisible(false) - ok() - } + relate({ id, relations }) }) } - async function fetchMiningImages() { - const result = await getMiningImage() - if (result) { - setImages(result.items) - } - } - - return + return
    { } }, ref)
    }) -const props = (state) => { - return { - username: state.user.username, - } -} -const actions = (dispatch) => { - return { - getImageRelated(id) { - return dispatch({ - type: 'image/getImageRelated', - payload: id, - }) - }, - relate(id, relations) { - return dispatch({ - type: 'image/relateImage', - payload: { id, relations }, - }) - }, - getMiningImage() { - return dispatch({ - type: 'image/getImages', - payload: { type: TYPES.MINING, offset: 0, limit: 10000, }, - }) - } - } -} -export default connect(props, actions, null, { forwardRef: true })(RelateModal) +export default RelateModal diff --git a/ymir/web/src/pages/image/components/share.js b/ymir/web/src/pages/image/components/share.js index 1015fcd6bb..e146bc6529 100644 --- a/ymir/web/src/pages/image/components/share.js +++ b/ymir/web/src/pages/image/components/share.js @@ -1,20 +1,31 @@ import { Modal, Form, Input, message } from "antd" import { useEffect, useState, forwardRef, useImperativeHandle } from "react" -import { connect } from 'dva' +import { useSelector } from 'umi' import t from '@/utils/t' import { phoneValidate } from "@/components/form/validators" +import useFetch from '@/hooks/useFetch' const { useForm } = Form -const ShareModal = forwardRef(({ username, email, phone, ok = () => {}, shareImage }, ref) => { +const ShareModal = forwardRef(({ ok = () => { } }, ref) => { const [shareForm] = useForm() const [visible, setVisible] = useState(false) const [id, setId] = useState(null) const [imageName, setImageName] = useState('') + const { username, email, phone } = useSelector(({ user }) => user) + const [shareResult, shareImage] = useFetch('image/shareImage') useEffect(() => { shareForm.setFieldsValue({ email, phone }) - }, [email, phone ]) + }, [email, phone]) + + useEffect(() => { + if (shareResult) { + message.success(t('image.share.success')) + setVisible(false) + ok() + } + }, [shareResult]) useImperativeHandle(ref, () => ({ show: (id, name) => { @@ -35,68 +46,51 @@ const ShareModal = forwardRef(({ username, email, phone, ok = () => {}, shareIma ...other, org: (org || '').trim(), } - const result = await shareImage(params) - if (result) { - message.success(t('image.share.success')) - setVisible(false) - ok() - } + shareImage(params) }) } - return - - - {/* {username} */} - - - - - - - - - - - + + + + + + + + + + + +
    }) -const props = (state) => { - return { - username: state.user.username, - phone: state.user.phone, - email: state.user.email, - } -} -const actions = (dispatch) => { - return { - shareImage(payload) { - return dispatch({ - type: 'image/shareImage', - payload, - }) - } - } -} -export default connect(props, actions, null, { forwardRef: true })(ShareModal) +export default ShareModal diff --git a/ymir/web/src/pages/image/components/shareImageList.js b/ymir/web/src/pages/image/components/shareImageList.js index 87565b451c..6dbc05d0b8 100644 --- a/ymir/web/src/pages/image/components/shareImageList.js +++ b/ymir/web/src/pages/image/components/shareImageList.js @@ -41,8 +41,8 @@ const ImageList = ({ role, getShareImages }) => { return isAdmin() ? menus : [] } - function copy (record) { - history.push({pathname: '/home/image/add', state: { record }}) + function copy(record) { + history.push({ pathname: '/home/image/add', state: { record } }) } function isAdmin() { @@ -76,11 +76,11 @@ const ImageList = ({ role, getShareImages }) => { {t('image.list.item.type')}{item.functions} {t('image.list.item.desc')}{item.description} +
    + + {item.organization} + {item.contributor} -
    - {item.organization} - {item.contributor} -
    diff --git a/ymir/web/src/pages/image/detail.js b/ymir/web/src/pages/image/detail.js index 23bc6a0526..86425b050c 100644 --- a/ymir/web/src/pages/image/detail.js +++ b/ymir/web/src/pages/image/detail.js @@ -1,39 +1,38 @@ -import React, { useEffect, useRef, useState } from "react" -import { Descriptions, List, Space, Tag, Card, Button, Row, Col } from "antd" -import { connect } from 'dva' -import { useParams, Link, useHistory } from "umi" +import React, { useEffect, useRef } from "react" +import { Descriptions, Space, Card, Button, Row, Col } from "antd" +import { useParams, Link, useHistory, useSelector } from "umi" import t from "@/utils/t" -import Breadcrumbs from "@/components/common/breadcrumb" import { TYPES, STATES, getImageTypeLabel } from '@/constants/image' import { ROLES } from '@/constants/user' +import useFetch from '@/hooks/useFetch' + +import Breadcrumbs from "@/components/common/breadcrumb" import LinkModal from "./components/relate" import ShareModal from "./components/share" import Del from './components/del' +import ImagesLink from "./components/imagesLink" +import StateTag from '@/components/task/stateTag' + import styles from "./detail.less" import { EditIcon, VectorIcon, TrainIcon, } from '@/components/common/icons' -import ImagesLink from "./components/imagesLink" -import StateTag from '../../components/task/stateTag' const { Item } = Descriptions -function ImageDetail({ role, getImage }) { +function ImageDetail() { const { id } = useParams() const history = useHistory() - const [image, setImage] = useState({ id }) + // const [image, setImage] = useState({ id }) const shareModalRef = useRef(null) const linkModalRef = useRef(null) const delRef = useRef(null) + const [image, getImage] = useFetch('image/getImage', { id }) + const role = useSelector(({ user }) => user.role) - useEffect(async () => { - fetchImage() - }, [id]) + useEffect(fetchImage, [id]) - async function fetchImage() { - const result = await getImage(id) - if (result) { - setImage(result) - } + function fetchImage() { + getImage(id) } function relateImage() { @@ -70,10 +69,10 @@ function ImageDetail({ role, getImage }) { function renderConfigs(configs = []) { return configs.map(({config, type }) => { - return <> + return

    {t(getImageTypeLabel([type])[0])}

    {renderConfig(config)}
    - +
    }) } @@ -111,7 +110,7 @@ function ImageDetail({ role, getImage }) { {image.name} {getImageTypeLabel(image.functions).map(label => t(label)).join(',')} {image.url} - {image.is_shared ? t('common.yes') : t('common.no')} + {image.isShared ? t('common.yes') : t('common.no')}
    {isAdmin() && isDone() ? : null} @@ -136,22 +135,4 @@ function ImageDetail({ role, getImage }) { ) } - -const props = (state) => { - return { - role: state.user.role, - } -} - -const actions = (dispatch) => { - return { - getImage(id) { - return dispatch({ - type: 'image/getImage', - payload: id, - }) - }, - } -} - -export default connect(props, actions)(ImageDetail) +export default ImageDetail diff --git a/ymir/web/src/pages/iteration/initModel.js b/ymir/web/src/pages/iteration/initModel.js index b28bd6d637..eb68486932 100644 --- a/ymir/web/src/pages/iteration/initModel.js +++ b/ymir/web/src/pages/iteration/initModel.js @@ -1,15 +1,13 @@ -import { Button, Card, Form, message, Select, Space, ConfigProvider } from 'antd' +import { Button, Card, Form, message, Space } from 'antd' import { connect } from 'dva' import { useEffect, useState } from 'react' import { useParams, useHistory } from 'umi' import { formLayout } from "@/config/antd" import t from '@/utils/t' -import EmptyStateModel from '@/components/empty/model' import ModelSelect from "@/components/form/modelSelect" import s from './add.less' import Breadcrumbs from '@/components/common/breadcrumb' -import Tip from "@/components/form/tip" const { useForm } = Form @@ -44,16 +42,16 @@ const InitModel = ({ projects = {}, ...props }) => { const result = await props.updateProject(params) if (result) { message.success(t('project.initmodel.success.msg')) - history.push(`/home/project/detail/${id}`) + history.goBack() } } function initForm(project = {}) { - const { model } = project + const { model, modelStage } = project if (model) { form.setFieldsValue({ - model, + modelStage, }) } } @@ -78,18 +76,16 @@ const InitModel = ({ projects = {}, ...props }) => { labelAlign={'left'} colon={false} > - }> - - - - + + + diff --git a/ymir/web/src/pages/keyword/index.js b/ymir/web/src/pages/keyword/index.js index 9dff33d8c5..31e49a6dda 100644 --- a/ymir/web/src/pages/keyword/index.js +++ b/ymir/web/src/pages/keyword/index.js @@ -24,6 +24,7 @@ const { useForm } = Form const initQuery = { name: "", + current: 1, offset: 0, limit: 20, } @@ -94,7 +95,7 @@ function Keyword({ getKeywords }) { const pageChange = ({ current, pageSize }) => { const limit = pageSize const offset = (current - 1) * pageSize - setQuery((old) => ({ ...old, limit, offset })) + setQuery((old) => ({ ...old, current, limit, offset })) } function showTitle(str) { @@ -233,7 +234,8 @@ function Keyword({ getKeywords }) { // total: 500, defaultPageSize: query.limit, showTotal: (total) => t("keyword.pager.total.label", { total }), - defaultCurrent: 1, + defaultCurrent: query.current, + current: query.current, }} columns={columns} >
    diff --git a/ymir/web/src/pages/keyword/multiAdd.js b/ymir/web/src/pages/keyword/multiAdd.js index 1d66c3f4c4..8a5469d9b5 100644 --- a/ymir/web/src/pages/keyword/multiAdd.js +++ b/ymir/web/src/pages/keyword/multiAdd.js @@ -28,8 +28,7 @@ const MultiAdd = forwardRef(({ addKeywords, ok = () => { } }, ref) => { form.resetFields() ok() } else { - message.error(t('keyword.name.repeat')) - setRepeats(result.failed || []) + message.error(`${t('keyword.name.repeat')}: ${(result.failed || []).join(',')}`) } } else { message.error(t('keyword.add.failure')) diff --git a/ymir/web/src/pages/model/add.js b/ymir/web/src/pages/model/add.js index b1d1ec8833..f69e61e429 100644 --- a/ymir/web/src/pages/model/add.js +++ b/ymir/web/src/pages/model/add.js @@ -2,14 +2,18 @@ import { useEffect, useState } from 'react' import { Button, Card, Form, Input, message, Modal, Select, Space, Upload } from 'antd' import { useParams, connect, useHistory, useLocation } from 'umi' +import { formLayout } from "@/config/antd" import t from '@/utils/t' import { generateName } from '@/utils/string' +import useFetch from '@/hooks/useFetch' + +import { urlValidator } from '@/components/form/validators' import Breadcrumbs from '@/components/common/breadcrumb' -import Tip from "@/components/form/tip" import ProjectSelect from "@/components/form/projectModelSelect" +import Desc from "@/components/form/desc" import Uploader from '@/components/form/uploader' + import s from './add.less' -import { urlValidator } from '@/components/form/validators' const { Option } = Select const { useForm } = Form @@ -21,7 +25,7 @@ const TYPES = Object.freeze({ NET: 3, }) -const Add = ({ importModel }) => { +const Add = () => { const types = [ { id: TYPES.COPY, label: t('model.add.types.copy') }, { id: TYPES.NET, label: t('model.add.types.net') }, @@ -30,7 +34,8 @@ const Add = ({ importModel }) => { const history = useHistory() const { query } = useLocation() - const { mid } = query + const { mid, from, stepKey } = query + const iterationContext = from === 'iteration' const { id: pid } = useParams() const [form] = useForm() const [path, setPath] = useState('') @@ -39,8 +44,27 @@ const Add = ({ importModel }) => { name: generateName('import_model'), modelId: Number(mid) ? [Number(pid), Number(mid)] : undefined, } + const [importResult, importModel] = useFetch('model/importModel') + const [updateResult, updateProject] = useFetch('project/updateProject') + + useEffect(() => { + if (updateResult) { + history.replace(`/home/project/${pid}/iterations`) + } + }, [updateResult]) + + useEffect(() => { + if (importResult) { + message.success(t('model.add.success')) + if (iterationContext && stepKey) { + return updateProject({ id: pid, [stepKey]: [importResult.id] }) + } + const group = importResult.model_group_id || '' + history.push(`/home/project/${pid}/model#${group}`) + } + }, [importResult]) - async function submit(values) { + function submit(values) { const params = { ...values, projectId: pid, @@ -56,11 +80,7 @@ const Add = ({ importModel }) => { if (values.modelId) { params.modelId = values.modelId[values.modelId.length - 1] } - const result = await importModel(params) - if (result) { - message.success(t('model.add.success')) - history.push(`/home/project/detail/${pid}#model`) - } + importModel(params) } const typeChange = (type) => { @@ -75,90 +95,74 @@ const Add = ({ importModel }) => {
    -
    - - + + + + + + + {isType(TYPES.COPY) ? - <> - - + + + : null} {isType(TYPES.LOCAL) ? - : null} + + { setPath(result) }} + max={1024} + format='all' + onRemove={() => setPath('')} + info={t('model.add.form.upload.info', { br:
    , max: 1024 })} + >
    +
    + : null} {isType(TYPES.NET) ? - : null} - - + : null} + + + + + + + + + + +
    @@ -166,16 +170,4 @@ const Add = ({ importModel }) => { ) } - -const actions = (dispatch) => { - return { - importModel: (payload) => { - return dispatch({ - type: 'model/importModel', - payload, - }) - }, - } -} - -export default connect(null, actions)(Add) +export default Add diff --git a/ymir/web/src/pages/model/detail.js b/ymir/web/src/pages/model/detail.js index 4077b1c960..63ab04ac10 100644 --- a/ymir/web/src/pages/model/detail.js +++ b/ymir/web/src/pages/model/detail.js @@ -13,6 +13,10 @@ import TaskProgress from "@/components/task/progress" import Error from "@/components/task/error" import Hide from "@/components/common/hide" import useRestore from "@/hooks/useRestore" +import keywordsItem from "@/components/task/items/keywords" +import { DescPop } from "../../components/common/descPop" +import useRerunAction from "../../hooks/useRerunAction" +import useCardTitle from '@/hooks/useCardTitle' const { Item } = Descriptions @@ -22,6 +26,8 @@ function ModelDetail({ modelCache, getModel }) { const [model, setModel] = useState({ id }) const hideRef = useRef(null) const restoreAction = useRestore(pid) + const generateRerunBtn = useRerunAction('btn') + const cardTitle = useCardTitle(model.name) useEffect(async () => { id && fetchModel(true) @@ -39,16 +45,6 @@ function ModelDetail({ modelCache, getModel }) { await getModel(id, force) } - function renderTitle() { - return ( - - {model.name} > {t(getTaskTypeLabel(model.taskType))} - - - ) - } - - const hide = (version) => { if (model?.project?.hiddenDatasets?.includes(version.id)) { return message.warn(t('dataset.hide.single.invalid')) @@ -67,31 +63,41 @@ function ModelDetail({ modelCache, getModel }) { } } + function getModelStage() { + const stage = model.recommendStage + return stage ? [id, stage].toString() : '' + } + return (
    - +
    {model.name} {model.versionName} {model.hidden ? {t('common.state.hidden')} : null} - {percent(model.map)} + {keywordsItem(model.keywords)} + + {model.stages?.map(stage => {stage.name} mAP: {percent(stage.map)})} + + fetchModel(true)} /> - {model?.task?.error_code ? : null} + {!model.hidden ? <> {model.url ? : null} - - - + + + : } + {generateRerunBtn(model)}
    diff --git a/ymir/web/src/pages/model/verify.js b/ymir/web/src/pages/model/verify.js index bba078c076..21ff197d54 100644 --- a/ymir/web/src/pages/model/verify.js +++ b/ymir/web/src/pages/model/verify.js @@ -11,13 +11,14 @@ import t from "@/utils/t" import { format } from '@/utils/date' import Breadcrumb from '@/components/common/breadcrumb' import Uploader from "@/components/form/uploader" -import AssetAnnotation from "@/components/dataset/asset_annotation" +import AssetAnnotation from "@/components/dataset/assetAnnotation" import { TYPES } from '@/constants/image' import styles from './verify.less' import { NavDatasetIcon, SearchEyeIcon, NoXlmxIcon } from '@/components/common/icons' import ImgDef from '@/assets/img_def.png' import ImageSelect from "@/components/form/imageSelect" import { percent } from "@/utils/number" +import useFetch from '@/hooks/useFetch' const { CheckableTag } = Tag @@ -25,10 +26,9 @@ const { CheckableTag } = Tag const KeywordColor = ["green", "red", "cyan", "blue", "yellow", "purple", "magenta", "orange", "gold"] -function Verify({ getModel, verify }) { +function Verify({ verify }) { const history = useHistory() - const { mid: id } = useParams() - const [model, setModel] = useState({}) + const { mid: id, id: pid } = useParams() const [url, setUrl] = useState('') const [confidence, setConfidence] = useState(20) const [annotations, setAnnotations] = useState([]) @@ -39,16 +39,14 @@ function Verify({ getModel, verify }) { const [seniorConfig, setSeniorConfig] = useState([]) const [hpVisible, setHpVisible] = useState(false) const IMGSIZELIMIT = 10 + const [model, getModel] = useFetch('model/getModel', {}) - useEffect(async () => { - const result = await getModel(id) - if (result) { - setModel(result) - } + useEffect(() => { + getModel({ id }) }, []) useEffect(() => { - setShowAnnos(annotations.length ? annotations.filter(anno => + setShowAnnos(annotations.length ? annotations.filter(anno => anno.score * 100 > confidence && selectedKeywords.indexOf(anno.keyword) > -1 ) : []) }, [confidence, annotations, selectedKeywords]) @@ -69,7 +67,6 @@ function Verify({ getModel, verify }) { function urlChange(files, url) { setUrl('') setUrl(files.length ? url : '') - // 上传图片后,清除上次的标注结果 setAnnotations([]) } @@ -97,7 +94,6 @@ function Verify({ getModel, verify }) {
    ) - // annotations.filter(anno => anno.confidence > confidence) function renderUploadBtn(label = t('model.verify.upload.label')) { return ( key && value ? config[key] = value : null) // reinit annotations setAnnotations([]) - const result = await verify(id, [url], image, config) - // console.log('result: ', result) + const result = await verify({ projectId: pid, modelStage: [id, model.recommendStage], urls: [url], image, config }) if (result) { - const all = result.annotations[0]?.detection || [] + const all = result || [] setAnnotations(all) if (all.length) { @@ -159,9 +149,9 @@ function Verify({ getModel, verify }) { } const onFinish = () => { - form.validateFields().then(() => { - verifyImg() - }) + form.validateFields().then(() => { + verifyImg() + }) } return ( @@ -169,7 +159,7 @@ function Verify({ getModel, verify }) { - + {url ? ( ) : renderUploader} {url ? (
    - - - `${value}%`} value={confidence} onChange={confidenceChange} /> - - - -
    + + + `${value}%`} value={confidence} onChange={confidenceChange} /> + + + + ) : null} - + {t("model.verify.model.info.title")}} bordered={false} @@ -227,84 +217,79 @@ function Verify({ getModel, verify }) {
    - + {seniorConfig.length ? - {t("model.verify.model.param.title")}} - bordered={false} - style={{ marginRight: 20 }} - headStyle={{ padding: 0, minHeight: 28 }} - bodyStyle={{ padding: 0 }} - extra={} - > - {t("model.verify.model.param.title")}} + bordered={false} + style={{ marginRight: 20 }} + headStyle={{ padding: 0, minHeight: 28 }} + bodyStyle={{ padding: 0 }} + extra={} > - - {(fields, { add, remove }) => ( - <> -