diff --git a/README.md b/README.md index 08ee32f6c9..1b4025e893 100755 --- a/README.md +++ b/README.md @@ -226,6 +226,11 @@ Execute the start command after the modification: `bash ymir.sh start`. 4. After the service successfully started, YMIR will be available at [http://localhost:12001/](http://localhost:12001/). If you need to **stop the service**, run the command: `bash ymir.sh stop` +5. The default initial user is super administrator, you can check account and password through the .env file under the project path and modify it before deployment. It is recommended to change the password through the user management interface after the service deployment is completed. +
+ +
 
+ ## 2.3. Installation of **Label Studio** (optional) **Label Sudio** is also an external labeling system supported by YMIR and can be installed as an alternative labeling tool. @@ -337,9 +342,9 @@ Users can download the example **Sample.zip** for reference as follows: 1. Download the open-source dataset VOC2012 ([Click to download VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar)) and unzip it. Change the folder name as required, and then compressing them separately into zip packages that meet the import requirements. -2. Place dataset VOC2012 under ymir-workplace/importing_pic. +2. Place dataset VOC2012 under `ymir-workplace/ymir-sharing`. -3. Select 'path import' and enter the absolute path address of the dataset in the server: /data/sharing/voc2012, as shown in the figure below: +3. Select 'path import' and enter the absolute path of the dataset in the server: `/ymir-sharing/voc2012`, as shown in the figure below: ![path import](https://github.com/IndustryEssentials/ymir-images/blob/main/doc_images/path%20import.jpg) diff --git a/README_zh-CN.md b/README_zh-CN.md index 2c323abe52..d7f7f2367a 100755 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -224,6 +224,11 @@ LABEL_TOOL_HOST_PORT=set_your_label_tool_HOST_PORT 4. 服务启动成功后,默认配置端口为12001,可以直接访问 [http://localhost:12001/](http://localhost:12001/) 显示登录界面即安装成功。如果需要**停止服务**,运行命令为:`bash ymir.sh stop` +5. 默认初始用户权限为超级管理员,可以通过项目路径下.env文件查看账号密码,部署前可自行设置修改。建议在服务部署完成后,通过用户管理界面修改密码。 +
+ +
 
+ ## 2.3. 安装配置LabelStudio (可选) label studio同时也是YMIR所支持的外接标注系统,可以作为备选标注工具安装。 @@ -320,9 +325,9 @@ LABEL_TOOL_TOKEN="Token token_value" 1.通过在网络中下载开源数据集VOC2012([点击下载VOC2012](http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar)),解压缩后按要求修改文件夹名称,再分别压缩为符合导入要求的zip包; -2.把VOC2012放到ymir-workplace/importing_pic下面; +2.把VOC2012放到 `ymir-workplace/ymir-sharing` 下面; -3.选择路径导入,填上路径地址/data/sharing/voc2012_train。 +3.选择路径导入,填上路径地址`/ymir-sharing/voc2012`。 完成初始数据集的导入后,点击【迭代数据准备】,完成对应的数据集和挖掘策略设置。其中训练集已设置为创建项目时默认的系统训练集,不可变更。 diff --git a/docker-compose.yml b/docker-compose.yml index 79b7c3f19a..fbef346caf 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -12,19 +12,17 @@ services: - MODELS_PATH=${YMIR_PATH}/ymir-models - ASSETS_PATH=${YMIR_PATH}/ymir-assets - CONTROLLER_LOG_PATH=${YMIR_PATH}/ymir-data/logs - - DATA_SOURCE=/data/sharing # viz - VIZ_REDIS_URI=redis://:@viz-redis # app - DATABASE_URI=mysql+pymysql://${MYSQL_INITIAL_USER}:${MYSQL_INITIAL_PASSWORD}@db/ymir - GRPC_CHANNEL=127.0.0.1:50066 - VIZ_HOST=127.0.0.1:9099 - - SHARED_DATA_DIR=/data/sharing + - SHARED_DATA_DIR=/ymir-sharing - NGINX_DATA_PATH=/ymir-storage # arq - CHECK_INTERVAL_IN_SECONDS=30 # monitor - - POSTMAN_URL=http://127.0.0.1:8090 - MONITOR_URL=http://127.0.0.1:9098 - APP_API_HOST=127.0.0.1:80 volumes: @@ -40,7 +38,7 @@ services: - /lib64/libltdl.so.7:/lib64/libltdl.so.7 # app - ${YMIR_PATH}/ymir-storage:/ymir-storage - - ${YMIR_PATH}/importing_pic:/data/sharing + - ${YMIR_PATH}/ymir-sharing:/ymir-sharing # tensorboard - "${TENSORBOARD_ROOT}:${TENSORBOARD_ROOT}" depends_on: diff --git a/docker_executor/public_index.md b/docker_executor/public_index.md index dfc818db08..86adbc1611 100644 --- a/docker_executor/public_index.md +++ b/docker_executor/public_index.md @@ -2,6 +2,6 @@ |docker_name|functions|contributor|organization|description| |--|--|--|--|--| -|industryessentials/executor-det-yolov4-training:release-0.5.0|training|alfrat|-|yolov4 detection model training| -|industryessentials/executor-det-yolov4-mining:release-0.5.0|mining inference|alfrat|-|yolov4 detection model mining & inference| +|industryessentials/executor-det-yolov4-training:release-1.1.0|training|alfrat|-|yolov4 detection model training| +|industryessentials/executor-det-yolov4-mining:release-1.1.0|mining inference|alfrat|-|yolov4 detection model mining & inference| diff --git a/ymir.sh b/ymir.sh index dbeb7f61d4..0f3045f005 100755 --- a/ymir.sh +++ b/ymir.sh @@ -149,11 +149,16 @@ docker-compose up -d } print_help() { - printf '\nUsage: \n sh ymir.sh start/stop.\n' + printf '\nUsage: \n bash ymir.sh start/stop.\n' } # main main() { + if [ "$EUID" -eq 0 ] + then echo "Error: using sudo, this will cause permission issue." + exit + fi + if [[ $# -eq 0 ]]; then print_help else diff --git a/ymir/Dockerfile.backend b/ymir/Dockerfile.backend index b8f6d30ae9..08d89b741d 100644 --- a/ymir/Dockerfile.backend +++ b/ymir/Dockerfile.backend @@ -14,6 +14,8 @@ RUN apt-get update \ && ln -s /usr/bin/python3 python \ && pip3 install -i ${PIP_SOURCE} --upgrade pip +RUN git config --global core.fileMode false + RUN pip install --no-cache-dir "uvicorn[standard]" gunicorn -i ${PIP_SOURCE} RUN mkdir -p /data/sharing/ @@ -23,7 +25,7 @@ COPY ./backend/requirements.txt ./ RUN pip3 install -r requirements.txt -i ${PIP_SOURCE} COPY ./command /command -RUN pip3 install -U /command +RUN pip3 install -U /command -i ${PIP_SOURCE} COPY ./backend/src /app WORKDIR /app diff --git a/ymir/backend/.bumpversion.cfg b/ymir/backend/.bumpversion.cfg index 3f62f84295..af6d29ae8d 100644 --- a/ymir/backend/.bumpversion.cfg +++ b/ymir/backend/.bumpversion.cfg @@ -1,3 +1,3 @@ [bumpversion] -current_version = 1.0.0 +current_version = 1.1.0 commit = False diff --git a/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini b/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini index 3df00f4d33..4d55c92e0f 100644 --- a/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini +++ b/ymir/backend/deploy/supervisor/conf.d/ymir_service.ini @@ -20,7 +20,7 @@ startsecs=50 [program:viz_service] -command=/bin/bash -c "cd ymir_viz&&gunicorn -k gevent -c gunicorn_conf.py wsgi:connexion_app" +command=/bin/bash -c "cd ymir_viz && gunicorn -k gevent -c gunicorn_conf.py wsgi:connexion_app" numprocs=1 autostart=true autorestart=true @@ -83,7 +83,7 @@ startsecs=50 [program:app] -command=/bin/bash -c "cd ymir_app&&sh prestart.sh&&sh start.sh" +command=/bin/bash -c "cd ymir_app && sh prestart.sh && sh start.sh" numprocs=1 autostart=true autorestart=true @@ -103,48 +103,6 @@ stopasgroup=true startsecs=50 -[program:postman_app] -command=/bin/bash -c "cd ymir_postman && uvicorn pm_app_server:app --port 8090 --host 0.0.0.0 --log-config uvicorn_log_config.json" -numprocs=1 -autostart=true -autorestart=true -redirect_stderr=true ; redirect proc stderr to stdout (default false) -stdout_logfile=/app_logs/ymir_postman_app.log ; stdout log path, NONE for none; default AUTO -stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB) -stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10)stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) -stdout_events_enabled=false ; emit events on stdout writes (default false) -stdout_syslog=false ; send stdout to syslog with process name (default false) -stderr_logfile=/app_logs/ymir_postman_app.log ; stderr log path, NONE for none; default AUTO -stderr_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB) -stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) -stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) -stderr_events_enabled=false ; emit events on stderr writes (default false) -stderr_syslog=false ; send stderr to syslog with process name (default false) -stopasgroup=true -startsecs=50 - - -[program:postman] -command=/bin/bash -c "cd ymir_postman && python pm_server.py" -numprocs=1 -autostart=true -autorestart=true -redirect_stderr=true ; redirect proc stderr to stdout (default false) -stdout_logfile=/app_logs/ymir_postman.log ; stdout log path, NONE for none; default AUTO -stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB) -stdout_logfile_backups=10 ; # of stdout logfile backups (0 means none, default 10)stdout_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) -stdout_events_enabled=false ; emit events on stdout writes (default false) -stdout_syslog=false ; send stdout to syslog with process name (default false) -stderr_logfile=/app_logs/ymir_postman.log ; stderr log path, NONE for none; default AUTO -stderr_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB) -stderr_logfile_backups=10 ; # of stderr logfile backups (0 means none, default 10) -stderr_capture_maxbytes=1MB ; number of bytes in 'capturemode' (default 0) -stderr_events_enabled=false ; emit events on stderr writes (default false) -stderr_syslog=false ; send stderr to syslog with process name (default false) -stopasgroup=true -startsecs=50 - - [program:monitor_crontab] command=python ymir_monitor/monitor/utils/crontab_job.py numprocs=1 diff --git a/ymir/backend/src/common/common_utils/percent_log_util.py b/ymir/backend/src/common/common_utils/percent_log_util.py index 51a1e74520..9ce17dea9c 100644 --- a/ymir/backend/src/common/common_utils/percent_log_util.py +++ b/ymir/backend/src/common/common_utils/percent_log_util.py @@ -57,7 +57,7 @@ def write_percent_log(log_file: str, raise RuntimeError("Invalid log_file") content_list: List[str] = [tid, f"{datetime.now().timestamp():.6f}", str(percent), str(state.value)] if error_code and error_message: - content_list.extend([str(error_code), error_message]) + content_list.extend([str(int(error_code)), error_message]) content = '\t'.join(content_list) if msg: content = '\n'.join([content, msg]) diff --git a/ymir/backend/src/common/id_definition/error_codes.py b/ymir/backend/src/common/id_definition/error_codes.py index c089288345..2d3b1051ee 100644 --- a/ymir/backend/src/common/id_definition/error_codes.py +++ b/ymir/backend/src/common/id_definition/error_codes.py @@ -17,6 +17,8 @@ class CTLResponseCode(IntEnum): INVOKER_INIT_ERROR = 130601 INVOKER_LABEL_TASK_UNKNOWN_ERROR = 130602 INVOKER_LABEL_TASK_NETWORK_ERROR = 130603 + INVOKER_HTTP_ERROR = 130604 + INVOKER_UNKNOWN_ERROR = 130605 @unique @@ -24,6 +26,7 @@ class VizErrorCode(IntEnum): GENERAL_ERROR = 140400 BRANCH_NOT_EXISTS = 140401 MODEL_NOT_EXISTS = 140402 + DATASET_EVALUATION_NOT_EXISTS = 140403 INTERNAL_ERROR = 140500 @@ -33,6 +36,7 @@ class MonitorErrorCode(IntEnum): DUPLICATE_TASK_ID = 150401 PERCENT_LOG_FILE_ERROR = 150402 PERCENT_LOG_WEIGHT_ERROR = 150403 + PERCENT_LOG_PARSE_ERROR = 150404 INTERNAL_ERROR = 150500 @@ -48,6 +52,7 @@ class APIErrorCode(IntEnum): FAILED_TO_DOWNLOAD = 110108 INVALID_CONFIGURATION = 110109 INVALID_SCOPE = 110110 + FAILED_TO_PROCESS_PROTECTED_RESOURCES = 110111 USER_NOT_FOUND = 110201 USER_DUPLICATED_NAME = 110202 @@ -63,6 +68,7 @@ class APIErrorCode(IntEnum): DATASET_NOT_ACCESSIBLE = 110403 DATASET_FAILED_TO_CREATE = 110404 DATASET_PROTECTED_TO_DELETE = 110405 + DATASETS_NOT_IN_SAME_GROUP = 110406 ASSET_NOT_FOUND = 110501 @@ -114,3 +120,8 @@ class APIErrorCode(IntEnum): ITERATION_COULD_NOT_UPDATE_STAGE = 111704 FAILED_TO_IMPORT_MODEL = 111801 + + REFUSE_TO_PROCESS_MIXED_OPERATIONS = 111901 + FAILED_TO_EVALUATE = 111902 + DATASET_EVALUATION_NOT_FOUND = 111903 + MISSING_OPERATIONS = 111904 diff --git a/ymir/backend/src/common/proto/backend.proto b/ymir/backend/src/common/proto/backend.proto index eb4ff29eeb..41c8db259d 100644 --- a/ymir/backend/src/common/proto/backend.proto +++ b/ymir/backend/src/common/proto/backend.proto @@ -29,7 +29,7 @@ enum TaskType { TaskTypeCopyModel = 14; TaskTypeDatasetInfer = 15; - reserved 12; + reserved 12, 16; }; enum LabelFormat { @@ -71,6 +71,9 @@ enum RequestType { CMD_PULL_IMAGE = 16; CMD_GPU_INFO_GET = 17; CMD_SAMPLING = 18; + CMD_EVALUATE = 19; + CMD_REPO_CHECK = 20; + CMD_REPO_CLEAR = 21; // Sandbox path operation USER_LIST = 101; @@ -120,6 +123,7 @@ message GeneralReq { } string task_parameters = 24; LabelCollection label_collection = 25; + EvaluateConfig evaluate_config = 26; ReqCreateTask req_create_task = 1001; reserved 17, 1002; @@ -134,6 +138,7 @@ message GeneralResp { map docker_image_config = 7; int32 available_gpu_counts = 8; LabelCollection label_collection = 9; + bool ops_ret = 10; RespCMDInference detection = 1001; reserved 5, 1000; @@ -272,6 +277,14 @@ message Label { string update_time = 5; // RFC 3339 date strings } +message EvaluateConfig { + // confidence threshold, 0 to 1 + float conf_thr = 1; + // from:to:step, to value is excluded (same as python range) + string iou_thrs_interval = 2; + // need pr curve in evaluation result, default is false + bool need_pr_curve = 3; +} service mir_controller_service { /* diff --git a/ymir/backend/src/common/proto/backend_pb2.py b/ymir/backend/src/common/proto/backend_pb2.py index ce65737820..e1da373ed3 100644 --- a/ymir/backend/src/common/proto/backend_pb2.py +++ b/ymir/backend/src/common/proto/backend_pb2.py @@ -20,7 +20,7 @@ syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\rbackend.proto\x12\x0cymir.backend\"\xe3\x05\n\nGeneralReq\x12\x0f\n\x07user_id\x18\x01 \x01(\t\x12\x0f\n\x07repo_id\x18\x02 \x01(\t\x12+\n\x08req_type\x18\x03 \x01(\x0e\x32\x19.ymir.backend.RequestType\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x14\n\x0csingleton_op\x18\x05 \x01(\t\x12\x13\n\x0bhis_task_id\x18\x06 \x01(\t\x12\x16\n\x0e\x64st_dataset_id\x18\x07 \x01(\t\x12\x16\n\x0ein_dataset_ids\x18\x08 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\t \x03(\t\x12\x14\n\x0cin_class_ids\x18\n \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x0b \x03(\x05\x12\r\n\x05\x66orce\x18\x0c \x01(\x08\x12\x16\n\x0e\x63ommit_message\x18\r \x01(\t\x12\x12\n\nmodel_hash\x18\x0e \x01(\t\x12\x11\n\tasset_dir\x18\x0f \x01(\t\x12\x1b\n\x13\x64ocker_image_config\x18\x10 \x01(\t\x12\x12\n\ncheck_only\x18\x12 \x01(\x08\x12\x16\n\x0e\x65xecutant_name\x18\x13 \x01(\t\x12\x33\n\x0emerge_strategy\x18\x14 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x34\n\x14terminated_task_type\x18\x15 \x01(\x0e\x32\x16.ymir.backend.TaskType\x12\x18\n\x0esampling_count\x18\x16 \x01(\x05H\x00\x12\x17\n\rsampling_rate\x18\x17 \x01(\x02H\x00\x12\x17\n\x0ftask_parameters\x18\x18 \x01(\t\x12\x37\n\x10label_collection\x18\x19 \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x35\n\x0freq_create_task\x18\xe9\x07 \x01(\x0b\x32\x1b.ymir.backend.ReqCreateTaskB\n\n\x08samplingJ\x04\x08\x11\x10\x12J\x06\x08\xea\x07\x10\xeb\x07\"\x86\x03\n\x0bGeneralResp\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0breq_task_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x10\n\x08\x65xt_strs\x18\x04 \x03(\t\x12\x0f\n\x07hash_id\x18\x06 \x01(\t\x12M\n\x13\x64ocker_image_config\x18\x07 \x03(\x0b\x32\x30.ymir.backend.GeneralResp.DockerImageConfigEntry\x12\x1c\n\x14\x61vailable_gpu_counts\x18\x08 \x01(\x05\x12\x37\n\x10label_collection\x18\t \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x32\n\tdetection\x18\xe9\x07 \x01(\x0b\x32\x1e.ymir.backend.RespCMDInference\x1a\x38\n\x16\x44ockerImageConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x05\x10\x06J\x06\x08\xe8\x07\x10\xe9\x07\"\xdb\x04\n\rReqCreateTask\x12)\n\ttask_type\x18\x01 \x01(\x0e\x32\x16.ymir.backend.TaskType\x12\x15\n\rsampling_rate\x18\x02 \x01(\x02\x12\x17\n\x0fno_task_monitor\x18\x03 \x01(\x08\x12/\n\x06\x66ilter\x18\x65 \x01(\x0b\x32\x1b.ymir.backend.TaskReqFilterB\x02\x18\x01\x12/\n\x08training\x18\x66 \x01(\x0b\x32\x1d.ymir.backend.TaskReqTraining\x12+\n\x06mining\x18g \x01(\x0b\x32\x1b.ymir.backend.TaskReqMining\x12\x31\n\timporting\x18h \x01(\x0b\x32\x1e.ymir.backend.TaskReqImporting\x12\x31\n\texporting\x18i \x01(\x0b\x32\x1e.ymir.backend.TaskReqExporting\x12\x31\n\tinference\x18j \x01(\x0b\x32\x1e.ymir.backend.TaskReqInference\x12+\n\x04\x63opy\x18k \x01(\x0b\x32\x1d.ymir.backend.TaskReqCopyData\x12/\n\x08labeling\x18l \x01(\x0b\x32\x1d.ymir.backend.TaskReqLabeling\x12+\n\x06\x66usion\x18m \x01(\x0b\x32\x1b.ymir.backend.TaskReqFusion\x12<\n\x0fmodel_importing\x18n \x01(\x0b\x32#.ymir.backend.TaskReqModelImporting\"S\n\rTaskReqFilter\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x14\n\x0cin_class_ids\x18\x02 \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x03 \x03(\x05\"\xd2\x01\n\x0fTaskReqTraining\x12K\n\x10in_dataset_types\x18\x01 \x03(\x0b\x32\x31.ymir.backend.TaskReqTraining.TrainingDatasetType\x12\x14\n\x0cin_class_ids\x18\x02 \x03(\x05\x1aV\n\x13TrainingDatasetType\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12+\n\x0c\x64\x61taset_type\x18\x02 \x01(\x0e\x32\x15.ymir.backend.TvtTypeJ\x04\x08\x03\x10\x04\"x\n\rTaskReqMining\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\x02 \x03(\t\x12\r\n\x05top_k\x18\x04 \x01(\x05\x12\x1c\n\x14generate_annotations\x18\x06 \x01(\x08J\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06\"[\n\x10TaskReqImporting\x12\x11\n\tasset_dir\x18\x01 \x01(\t\x12\x16\n\x0e\x61nnotation_dir\x18\x02 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x03 \x01(\x08\"|\n\x10TaskReqExporting\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12)\n\x06\x66ormat\x18\x02 \x01(\x0e\x32\x19.ymir.backend.LabelFormat\x12\x11\n\tasset_dir\x18\x03 \x01(\t\x12\x16\n\x0e\x61nnotation_dir\x18\x04 \x01(\t\"\x12\n\x10TaskReqInference\"\x8b\x01\n\x0fTaskReqCopyData\x12\x13\n\x0bsrc_user_id\x18\x01 \x01(\t\x12\x13\n\x0bsrc_repo_id\x18\x02 \x01(\t\x12\x16\n\x0esrc_dataset_id\x18\x03 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x04 \x01(\x08\x12\x18\n\x10\x64rop_annotations\x18\x05 \x01(\x08\"\xa6\x01\n\x0fTaskReqLabeling\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\x18\n\x10labeler_accounts\x18\x02 \x03(\t\x12\x14\n\x0cin_class_ids\x18\x03 \x03(\x05\x12\x1e\n\x16\x65xpert_instruction_url\x18\x04 \x01(\t\x12\x14\n\x0cproject_name\x18\x05 \x01(\t\x12\x19\n\x11\x65xport_annotation\x18\x06 \x01(\x08\"\xcd\x01\n\rTaskReqFusion\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\x02 \x03(\t\x12\x33\n\x0emerge_strategy\x18\x03 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x14\n\x0cin_class_ids\x18\x04 \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x05 \x03(\x05\x12\x0f\n\x05\x63ount\x18\x06 \x01(\x05H\x00\x12\x0e\n\x04rate\x18\x07 \x01(\x02H\x00\x42\n\n\x08sampling\"3\n\x15TaskReqModelImporting\x12\x1a\n\x12model_package_path\x18\x01 \x01(\t\"\xc2\x01\n\x10RespCMDInference\x12O\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x34.ymir.backend.RespCMDInference.ImageAnnotationsEntry\x1a]\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.ymir.backend.SingleImageAnnotations:\x02\x38\x01\"G\n\x16SingleImageAnnotations\x12-\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x18.ymir.backend.Annotation\"q\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1f\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x12.ymir.backend.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\x12\x12\n\nclass_name\x18\x05 \x01(\t\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"6\n\x0fLabelCollection\x12#\n\x06labels\x18\x01 \x03(\x0b\x32\x13.ymir.backend.Label\"\\\n\x05Label\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x61liases\x18\x03 \x03(\t\x12\x13\n\x0b\x63reate_time\x18\x04 \x01(\t\x12\x13\n\x0bupdate_time\x18\x05 \x01(\t*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\xd6\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x15\n\x11TaskTypeCopyModel\x10\x0e\x12\x18\n\x14TaskTypeDatasetInfer\x10\x0f\"\x04\x08\x0c\x10\x0c*S\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x12\x15\n\x11LABEL_STUDIO_JSON\x10\x03*U\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03*#\n\rMergeStrategy\x12\x08\n\x04STOP\x10\x00\x12\x08\n\x04HOST\x10\x01*\xd6\x03\n\x0bRequestType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x12\n\x0e\x43MD_BRANCH_DEL\x10\x01\x12\x13\n\x0f\x43MD_BRANCH_LIST\x10\x02\x12\x17\n\x13\x43MD_BRANCH_CHECKOUT\x10\x03\x12\x15\n\x11\x43MD_BRANCH_CREATE\x10\x04\x12\r\n\tCMD_CLONE\x10\x05\x12\x0e\n\nCMD_COMMIT\x10\x06\x12\x0e\n\nCMD_FILTER\x10\x07\x12\x0c\n\x08\x43MD_INIT\x10\x08\x12\x0b\n\x07\x43MD_LOG\x10\t\x12\r\n\tCMD_MERGE\x10\n\x12\x11\n\rCMD_INFERENCE\x10\x0b\x12\x11\n\rCMD_LABEL_ADD\x10\x0c\x12\x11\n\rCMD_LABEL_GET\x10\r\x12\x11\n\rCMD_TERMINATE\x10\x0e\x12\x12\n\x0e\x43MD_PULL_IMAGE\x10\x10\x12\x14\n\x10\x43MD_GPU_INFO_GET\x10\x11\x12\x10\n\x0c\x43MD_SAMPLING\x10\x12\x12\r\n\tUSER_LIST\x10\x65\x12\x0f\n\x0bUSER_CREATE\x10\x66\x12\x0f\n\x0bUSER_REMOVE\x10g\x12\r\n\tREPO_LIST\x10h\x12\x0f\n\x0bREPO_CREATE\x10i\x12\x0f\n\x0bREPO_REMOVE\x10j\x12\x10\n\x0bTASK_CREATE\x10\xe9\x07\"\x04\x08\x0f\x10\x0f\"\x06\x08\xea\x07\x10\xea\x07\x32\x66\n\x16mir_controller_service\x12L\n\x13\x64\x61ta_manage_request\x12\x18.ymir.backend.GeneralReq\x1a\x19.ymir.backend.GeneralResp\"\x00\x62\x06proto3' + serialized_pb=b'\n\rbackend.proto\x12\x0cymir.backend\"\x9a\x06\n\nGeneralReq\x12\x0f\n\x07user_id\x18\x01 \x01(\t\x12\x0f\n\x07repo_id\x18\x02 \x01(\t\x12+\n\x08req_type\x18\x03 \x01(\x0e\x32\x19.ymir.backend.RequestType\x12\x0f\n\x07task_id\x18\x04 \x01(\t\x12\x14\n\x0csingleton_op\x18\x05 \x01(\t\x12\x13\n\x0bhis_task_id\x18\x06 \x01(\t\x12\x16\n\x0e\x64st_dataset_id\x18\x07 \x01(\t\x12\x16\n\x0ein_dataset_ids\x18\x08 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\t \x03(\t\x12\x14\n\x0cin_class_ids\x18\n \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x0b \x03(\x05\x12\r\n\x05\x66orce\x18\x0c \x01(\x08\x12\x16\n\x0e\x63ommit_message\x18\r \x01(\t\x12\x12\n\nmodel_hash\x18\x0e \x01(\t\x12\x11\n\tasset_dir\x18\x0f \x01(\t\x12\x1b\n\x13\x64ocker_image_config\x18\x10 \x01(\t\x12\x12\n\ncheck_only\x18\x12 \x01(\x08\x12\x16\n\x0e\x65xecutant_name\x18\x13 \x01(\t\x12\x33\n\x0emerge_strategy\x18\x14 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x34\n\x14terminated_task_type\x18\x15 \x01(\x0e\x32\x16.ymir.backend.TaskType\x12\x18\n\x0esampling_count\x18\x16 \x01(\x05H\x00\x12\x17\n\rsampling_rate\x18\x17 \x01(\x02H\x00\x12\x17\n\x0ftask_parameters\x18\x18 \x01(\t\x12\x37\n\x10label_collection\x18\x19 \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x35\n\x0f\x65valuate_config\x18\x1a \x01(\x0b\x32\x1c.ymir.backend.EvaluateConfig\x12\x35\n\x0freq_create_task\x18\xe9\x07 \x01(\x0b\x32\x1b.ymir.backend.ReqCreateTaskB\n\n\x08samplingJ\x04\x08\x11\x10\x12J\x06\x08\xea\x07\x10\xeb\x07\"\x97\x03\n\x0bGeneralResp\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0breq_task_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x10\n\x08\x65xt_strs\x18\x04 \x03(\t\x12\x0f\n\x07hash_id\x18\x06 \x01(\t\x12M\n\x13\x64ocker_image_config\x18\x07 \x03(\x0b\x32\x30.ymir.backend.GeneralResp.DockerImageConfigEntry\x12\x1c\n\x14\x61vailable_gpu_counts\x18\x08 \x01(\x05\x12\x37\n\x10label_collection\x18\t \x01(\x0b\x32\x1d.ymir.backend.LabelCollection\x12\x0f\n\x07ops_ret\x18\n \x01(\x08\x12\x32\n\tdetection\x18\xe9\x07 \x01(\x0b\x32\x1e.ymir.backend.RespCMDInference\x1a\x38\n\x16\x44ockerImageConfigEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01J\x04\x08\x05\x10\x06J\x06\x08\xe8\x07\x10\xe9\x07\"\xdb\x04\n\rReqCreateTask\x12)\n\ttask_type\x18\x01 \x01(\x0e\x32\x16.ymir.backend.TaskType\x12\x15\n\rsampling_rate\x18\x02 \x01(\x02\x12\x17\n\x0fno_task_monitor\x18\x03 \x01(\x08\x12/\n\x06\x66ilter\x18\x65 \x01(\x0b\x32\x1b.ymir.backend.TaskReqFilterB\x02\x18\x01\x12/\n\x08training\x18\x66 \x01(\x0b\x32\x1d.ymir.backend.TaskReqTraining\x12+\n\x06mining\x18g \x01(\x0b\x32\x1b.ymir.backend.TaskReqMining\x12\x31\n\timporting\x18h \x01(\x0b\x32\x1e.ymir.backend.TaskReqImporting\x12\x31\n\texporting\x18i \x01(\x0b\x32\x1e.ymir.backend.TaskReqExporting\x12\x31\n\tinference\x18j \x01(\x0b\x32\x1e.ymir.backend.TaskReqInference\x12+\n\x04\x63opy\x18k \x01(\x0b\x32\x1d.ymir.backend.TaskReqCopyData\x12/\n\x08labeling\x18l \x01(\x0b\x32\x1d.ymir.backend.TaskReqLabeling\x12+\n\x06\x66usion\x18m \x01(\x0b\x32\x1b.ymir.backend.TaskReqFusion\x12<\n\x0fmodel_importing\x18n \x01(\x0b\x32#.ymir.backend.TaskReqModelImporting\"S\n\rTaskReqFilter\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x14\n\x0cin_class_ids\x18\x02 \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x03 \x03(\x05\"\xd2\x01\n\x0fTaskReqTraining\x12K\n\x10in_dataset_types\x18\x01 \x03(\x0b\x32\x31.ymir.backend.TaskReqTraining.TrainingDatasetType\x12\x14\n\x0cin_class_ids\x18\x02 \x03(\x05\x1aV\n\x13TrainingDatasetType\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12+\n\x0c\x64\x61taset_type\x18\x02 \x01(\x0e\x32\x15.ymir.backend.TvtTypeJ\x04\x08\x03\x10\x04\"x\n\rTaskReqMining\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\x02 \x03(\t\x12\r\n\x05top_k\x18\x04 \x01(\x05\x12\x1c\n\x14generate_annotations\x18\x06 \x01(\x08J\x04\x08\x03\x10\x04J\x04\x08\x05\x10\x06\"[\n\x10TaskReqImporting\x12\x11\n\tasset_dir\x18\x01 \x01(\t\x12\x16\n\x0e\x61nnotation_dir\x18\x02 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x03 \x01(\x08\"|\n\x10TaskReqExporting\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12)\n\x06\x66ormat\x18\x02 \x01(\x0e\x32\x19.ymir.backend.LabelFormat\x12\x11\n\tasset_dir\x18\x03 \x01(\t\x12\x16\n\x0e\x61nnotation_dir\x18\x04 \x01(\t\"\x12\n\x10TaskReqInference\"\x8b\x01\n\x0fTaskReqCopyData\x12\x13\n\x0bsrc_user_id\x18\x01 \x01(\t\x12\x13\n\x0bsrc_repo_id\x18\x02 \x01(\t\x12\x16\n\x0esrc_dataset_id\x18\x03 \x01(\t\x12\x1c\n\x14name_strategy_ignore\x18\x04 \x01(\x08\x12\x18\n\x10\x64rop_annotations\x18\x05 \x01(\x08\"\xa6\x01\n\x0fTaskReqLabeling\x12\x12\n\ndataset_id\x18\x01 \x01(\t\x12\x18\n\x10labeler_accounts\x18\x02 \x03(\t\x12\x14\n\x0cin_class_ids\x18\x03 \x03(\x05\x12\x1e\n\x16\x65xpert_instruction_url\x18\x04 \x01(\t\x12\x14\n\x0cproject_name\x18\x05 \x01(\t\x12\x19\n\x11\x65xport_annotation\x18\x06 \x01(\x08\"\xcd\x01\n\rTaskReqFusion\x12\x16\n\x0ein_dataset_ids\x18\x01 \x03(\t\x12\x16\n\x0e\x65x_dataset_ids\x18\x02 \x03(\t\x12\x33\n\x0emerge_strategy\x18\x03 \x01(\x0e\x32\x1b.ymir.backend.MergeStrategy\x12\x14\n\x0cin_class_ids\x18\x04 \x03(\x05\x12\x14\n\x0c\x65x_class_ids\x18\x05 \x03(\x05\x12\x0f\n\x05\x63ount\x18\x06 \x01(\x05H\x00\x12\x0e\n\x04rate\x18\x07 \x01(\x02H\x00\x42\n\n\x08sampling\"3\n\x15TaskReqModelImporting\x12\x1a\n\x12model_package_path\x18\x01 \x01(\t\"\xc2\x01\n\x10RespCMDInference\x12O\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x34.ymir.backend.RespCMDInference.ImageAnnotationsEntry\x1a]\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.ymir.backend.SingleImageAnnotations:\x02\x38\x01\"G\n\x16SingleImageAnnotations\x12-\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x18.ymir.backend.Annotation\"q\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1f\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x12.ymir.backend.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\x12\x12\n\nclass_name\x18\x05 \x01(\t\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"6\n\x0fLabelCollection\x12#\n\x06labels\x18\x01 \x03(\x0b\x32\x13.ymir.backend.Label\"\\\n\x05Label\x12\n\n\x02id\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07\x61liases\x18\x03 \x03(\t\x12\x13\n\x0b\x63reate_time\x18\x04 \x01(\t\x12\x13\n\x0bupdate_time\x18\x05 \x01(\t\"T\n\x0e\x45valuateConfig\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x02 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x03 \x01(\x08*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\xdc\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x15\n\x11TaskTypeCopyModel\x10\x0e\x12\x18\n\x14TaskTypeDatasetInfer\x10\x0f\"\x04\x08\x0c\x10\x0c\"\x04\x08\x10\x10\x10*S\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x12\x15\n\x11LABEL_STUDIO_JSON\x10\x03*U\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03*#\n\rMergeStrategy\x12\x08\n\x04STOP\x10\x00\x12\x08\n\x04HOST\x10\x01*\x90\x04\n\x0bRequestType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x12\n\x0e\x43MD_BRANCH_DEL\x10\x01\x12\x13\n\x0f\x43MD_BRANCH_LIST\x10\x02\x12\x17\n\x13\x43MD_BRANCH_CHECKOUT\x10\x03\x12\x15\n\x11\x43MD_BRANCH_CREATE\x10\x04\x12\r\n\tCMD_CLONE\x10\x05\x12\x0e\n\nCMD_COMMIT\x10\x06\x12\x0e\n\nCMD_FILTER\x10\x07\x12\x0c\n\x08\x43MD_INIT\x10\x08\x12\x0b\n\x07\x43MD_LOG\x10\t\x12\r\n\tCMD_MERGE\x10\n\x12\x11\n\rCMD_INFERENCE\x10\x0b\x12\x11\n\rCMD_LABEL_ADD\x10\x0c\x12\x11\n\rCMD_LABEL_GET\x10\r\x12\x11\n\rCMD_TERMINATE\x10\x0e\x12\x12\n\x0e\x43MD_PULL_IMAGE\x10\x10\x12\x14\n\x10\x43MD_GPU_INFO_GET\x10\x11\x12\x10\n\x0c\x43MD_SAMPLING\x10\x12\x12\x10\n\x0c\x43MD_EVALUATE\x10\x13\x12\x12\n\x0e\x43MD_REPO_CHECK\x10\x14\x12\x12\n\x0e\x43MD_REPO_CLEAR\x10\x15\x12\r\n\tUSER_LIST\x10\x65\x12\x0f\n\x0bUSER_CREATE\x10\x66\x12\x0f\n\x0bUSER_REMOVE\x10g\x12\r\n\tREPO_LIST\x10h\x12\x0f\n\x0bREPO_CREATE\x10i\x12\x0f\n\x0bREPO_REMOVE\x10j\x12\x10\n\x0bTASK_CREATE\x10\xe9\x07\"\x04\x08\x0f\x10\x0f\"\x06\x08\xea\x07\x10\xea\x07\x32\x66\n\x16mir_controller_service\x12L\n\x13\x64\x61ta_manage_request\x12\x18.ymir.backend.GeneralReq\x1a\x19.ymir.backend.GeneralResp\"\x00\x62\x06proto3' ) _TVTTYPE = _descriptor.EnumDescriptor( @@ -53,8 +53,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3590, - serialized_end=3680, + serialized_start=3748, + serialized_end=3838, ) _sym_db.RegisterEnumDescriptor(_TVTTYPE) @@ -144,8 +144,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3683, - serialized_end=4025, + serialized_start=3841, + serialized_end=4189, ) _sym_db.RegisterEnumDescriptor(_TASKTYPE) @@ -180,8 +180,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4027, - serialized_end=4110, + serialized_start=4191, + serialized_end=4274, ) _sym_db.RegisterEnumDescriptor(_LABELFORMAT) @@ -216,8 +216,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4112, - serialized_end=4197, + serialized_start=4276, + serialized_end=4361, ) _sym_db.RegisterEnumDescriptor(_MIRSTORAGE) @@ -242,8 +242,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=4199, - serialized_end=4234, + serialized_start=4363, + serialized_end=4398, ) _sym_db.RegisterEnumDescriptor(_MERGESTRATEGY) @@ -346,45 +346,60 @@ type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='USER_LIST', index=18, number=101, + name='CMD_EVALUATE', index=18, number=19, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='USER_CREATE', index=19, number=102, + name='CMD_REPO_CHECK', index=19, number=20, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='USER_REMOVE', index=20, number=103, + name='CMD_REPO_CLEAR', index=20, number=21, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='REPO_LIST', index=21, number=104, + name='USER_LIST', index=21, number=101, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='REPO_CREATE', index=22, number=105, + name='USER_CREATE', index=22, number=102, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='REPO_REMOVE', index=23, number=106, + name='USER_REMOVE', index=23, number=103, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), _descriptor.EnumValueDescriptor( - name='TASK_CREATE', index=24, number=1001, + name='REPO_LIST', index=24, number=104, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='REPO_CREATE', index=25, number=105, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='REPO_REMOVE', index=26, number=106, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TASK_CREATE', index=27, number=1001, serialized_options=None, type=None, create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=4237, - serialized_end=4707, + serialized_start=4401, + serialized_end=4929, ) _sym_db.RegisterEnumDescriptor(_REQUESTTYPE) @@ -436,6 +451,9 @@ CMD_PULL_IMAGE = 16 CMD_GPU_INFO_GET = 17 CMD_SAMPLING = 18 +CMD_EVALUATE = 19 +CMD_REPO_CHECK = 20 +CMD_REPO_CLEAR = 21 USER_LIST = 101 USER_CREATE = 102 USER_REMOVE = 103 @@ -623,7 +641,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='req_create_task', full_name='ymir.backend.GeneralReq.req_create_task', index=24, + name='evaluate_config', full_name='ymir.backend.GeneralReq.evaluate_config', index=24, + number=26, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='req_create_task', full_name='ymir.backend.GeneralReq.req_create_task', index=25, number=1001, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -647,7 +672,7 @@ fields=[]), ], serialized_start=32, - serialized_end=771, + serialized_end=826, ) @@ -685,8 +710,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1094, - serialized_end=1150, + serialized_start=1166, + serialized_end=1222, ) _GENERALRESP = _descriptor.Descriptor( @@ -754,7 +779,14 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='detection', full_name='ymir.backend.GeneralResp.detection', index=8, + name='ops_ret', full_name='ymir.backend.GeneralResp.ops_ret', index=8, + number=10, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='detection', full_name='ymir.backend.GeneralResp.detection', index=9, number=1001, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -772,8 +804,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=774, - serialized_end=1164, + serialized_start=829, + serialized_end=1236, ) @@ -888,8 +920,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1167, - serialized_end=1770, + serialized_start=1239, + serialized_end=1842, ) @@ -934,8 +966,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1772, - serialized_end=1855, + serialized_start=1844, + serialized_end=1927, ) @@ -973,8 +1005,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1976, - serialized_end=2062, + serialized_start=2048, + serialized_end=2134, ) _TASKREQTRAINING = _descriptor.Descriptor( @@ -1011,8 +1043,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=1858, - serialized_end=2068, + serialized_start=1930, + serialized_end=2140, ) @@ -1064,8 +1096,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2070, - serialized_end=2190, + serialized_start=2142, + serialized_end=2262, ) @@ -1110,8 +1142,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2192, - serialized_end=2283, + serialized_start=2264, + serialized_end=2355, ) @@ -1163,8 +1195,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2285, - serialized_end=2409, + serialized_start=2357, + serialized_end=2481, ) @@ -1188,8 +1220,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2411, - serialized_end=2429, + serialized_start=2483, + serialized_end=2501, ) @@ -1248,8 +1280,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2432, - serialized_end=2571, + serialized_start=2504, + serialized_end=2643, ) @@ -1315,8 +1347,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2574, - serialized_end=2740, + serialized_start=2646, + serialized_end=2812, ) @@ -1394,8 +1426,8 @@ create_key=_descriptor._internal_create_key, fields=[]), ], - serialized_start=2743, - serialized_end=2948, + serialized_start=2815, + serialized_end=3020, ) @@ -1426,8 +1458,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2950, - serialized_end=3001, + serialized_start=3022, + serialized_end=3073, ) @@ -1465,8 +1497,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3105, - serialized_end=3198, + serialized_start=3177, + serialized_end=3270, ) _RESPCMDINFERENCE = _descriptor.Descriptor( @@ -1496,8 +1528,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3004, - serialized_end=3198, + serialized_start=3076, + serialized_end=3270, ) @@ -1528,8 +1560,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3200, - serialized_end=3271, + serialized_start=3272, + serialized_end=3343, ) @@ -1588,8 +1620,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3273, - serialized_end=3386, + serialized_start=3345, + serialized_end=3458, ) @@ -1641,8 +1673,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3388, - serialized_end=3438, + serialized_start=3460, + serialized_end=3510, ) @@ -1673,8 +1705,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3440, - serialized_end=3494, + serialized_start=3512, + serialized_end=3566, ) @@ -1733,14 +1765,61 @@ extension_ranges=[], oneofs=[ ], - serialized_start=3496, - serialized_end=3588, + serialized_start=3568, + serialized_end=3660, +) + + +_EVALUATECONFIG = _descriptor.Descriptor( + name='EvaluateConfig', + full_name='ymir.backend.EvaluateConfig', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='conf_thr', full_name='ymir.backend.EvaluateConfig.conf_thr', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_thrs_interval', full_name='ymir.backend.EvaluateConfig.iou_thrs_interval', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='need_pr_curve', full_name='ymir.backend.EvaluateConfig.need_pr_curve', index=2, + number=3, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3662, + serialized_end=3746, ) _GENERALREQ.fields_by_name['req_type'].enum_type = _REQUESTTYPE _GENERALREQ.fields_by_name['merge_strategy'].enum_type = _MERGESTRATEGY _GENERALREQ.fields_by_name['terminated_task_type'].enum_type = _TASKTYPE _GENERALREQ.fields_by_name['label_collection'].message_type = _LABELCOLLECTION +_GENERALREQ.fields_by_name['evaluate_config'].message_type = _EVALUATECONFIG _GENERALREQ.fields_by_name['req_create_task'].message_type = _REQCREATETASK _GENERALREQ.oneofs_by_name['sampling'].fields.append( _GENERALREQ.fields_by_name['sampling_count']) @@ -1799,6 +1878,7 @@ DESCRIPTOR.message_types_by_name['Rect'] = _RECT DESCRIPTOR.message_types_by_name['LabelCollection'] = _LABELCOLLECTION DESCRIPTOR.message_types_by_name['Label'] = _LABEL +DESCRIPTOR.message_types_by_name['EvaluateConfig'] = _EVALUATECONFIG DESCRIPTOR.enum_types_by_name['TvtType'] = _TVTTYPE DESCRIPTOR.enum_types_by_name['TaskType'] = _TASKTYPE DESCRIPTOR.enum_types_by_name['LabelFormat'] = _LABELFORMAT @@ -1964,6 +2044,13 @@ }) _sym_db.RegisterMessage(Label) +EvaluateConfig = _reflection.GeneratedProtocolMessageType('EvaluateConfig', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATECONFIG, + '__module__' : 'backend_pb2' + # @@protoc_insertion_point(class_scope:ymir.backend.EvaluateConfig) + }) +_sym_db.RegisterMessage(EvaluateConfig) + _GENERALRESP_DOCKERIMAGECONFIGENTRY._options = None _REQCREATETASK.fields_by_name['filter']._options = None @@ -1976,8 +2063,8 @@ index=0, serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_start=4709, - serialized_end=4811, + serialized_start=4931, + serialized_end=5033, methods=[ _descriptor.MethodDescriptor( name='data_manage_request', diff --git a/ymir/backend/src/ymir_app/alembic/versions/32b2a244fd5d_set_image_description_length_limit_to_.py b/ymir/backend/src/ymir_app/alembic/versions/32b2a244fd5d_set_image_description_length_limit_to_.py new file mode 100644 index 0000000000..b86691ced8 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/32b2a244fd5d_set_image_description_length_limit_to_.py @@ -0,0 +1,36 @@ +"""set image description length limit to 500 + +Revision ID: 32b2a244fd5d +Revises: 8780677bb227 +Create Date: 2022-04-27 15:58:54.698756 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "32b2a244fd5d" +down_revision = "8780677bb227" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("docker_image", schema=None) as batch_op: + batch_op.alter_column( + "description", existing_type=sa.VARCHAR(length=100), type_=sa.String(length=500), existing_nullable=True + ) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("docker_image", schema=None) as batch_op: + batch_op.alter_column( + "description", existing_type=sa.String(length=500), type_=sa.VARCHAR(length=100), existing_nullable=True + ) + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/alembic/versions/3c495c9f691e_add_is_visible_to_model_and_dataset.py b/ymir/backend/src/ymir_app/alembic/versions/3c495c9f691e_add_is_visible_to_model_and_dataset.py new file mode 100644 index 0000000000..e93d295981 --- /dev/null +++ b/ymir/backend/src/ymir_app/alembic/versions/3c495c9f691e_add_is_visible_to_model_and_dataset.py @@ -0,0 +1,38 @@ +"""add is_visible to model and dataset + +Revision ID: 3c495c9f691e +Revises: 32b2a244fd5d +Create Date: 2022-04-29 13:29:24.271877 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = "3c495c9f691e" +down_revision = "32b2a244fd5d" +branch_labels = None +depends_on = None + + +def upgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("dataset", schema=None) as batch_op: + batch_op.add_column(sa.Column("is_visible", sa.Boolean(), nullable=False, server_default="1")) + + with op.batch_alter_table("model", schema=None) as batch_op: + batch_op.add_column(sa.Column("is_visible", sa.Boolean(), nullable=False, server_default="1")) + + # ### end Alembic commands ### + + +def downgrade() -> None: + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table("model", schema=None) as batch_op: + batch_op.drop_column("is_visible") + + with op.batch_alter_table("dataset", schema=None) as batch_op: + batch_op.drop_column("is_visible") + + # ### end Alembic commands ### diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py index dac9f9c29f..fc60ae027d 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/datasets.py @@ -1,7 +1,7 @@ from operator import attrgetter import enum import random -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, List from fastapi import APIRouter, BackgroundTasks, Depends, Path, Query from fastapi.encoders import jsonable_encoder @@ -16,18 +16,20 @@ DuplicateDatasetGroupError, NoDatasetPermission, FailedtoCreateTask, + FailedToHideProtectedResources, DatasetGroupNotFound, + ProjectNotFound, + MissingOperations, + RefuseToProcessMixedOperations, + DatasetsNotInSameGroup, ) from app.config import settings from app.constants.state import TaskState, TaskType, ResultState from app.utils.iteration import get_iteration_context_converter -from app.utils.ymir_controller import ( - ControllerClient, - gen_task_hash, -) +from app.utils.ymir_controller import ControllerClient, gen_task_hash from app.utils.ymir_viz import VizClient from app.schemas.dataset import MergeStrategy -from app.libs.datasets import import_dataset_in_background +from app.libs.datasets import import_dataset_in_background, evaluate_dataset from common_utils.labels import UserLabels router = APIRouter() @@ -48,9 +50,38 @@ def batch_get_datasets( return {"result": datasets} +@router.post( + "/batch", + response_model=schemas.DatasetsOut, +) +def batch_update_datasets( + *, + db: Session = Depends(deps.get_db), + dataset_ops: schemas.BatchOperations, + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + if not dataset_ops.operations: + raise MissingOperations() + project = crud.project.get(db, dataset_ops.project_id) + if not project: + raise ProjectNotFound() + to_process = {op.id_ for op in dataset_ops.operations} + if to_process.intersection(project.referenced_dataset_ids): + raise FailedToHideProtectedResources() + actions = {op.action for op in dataset_ops.operations} + if len(actions) != 1: + # for now, we do not support mixed operations, for example, + # hide and unhide in a single batch request + raise RefuseToProcessMixedOperations() + + datasets = crud.dataset.batch_toggle_visibility(db, ids=list(to_process), action=list(actions)[0]) + return {"result": datasets} + + class SortField(enum.Enum): id = "id" create_datetime = "create_datetime" + update_datetime = "update_datetime" asset_count = "asset_count" source = "source" @@ -64,6 +95,7 @@ def list_datasets( source: TaskType = Query(None, description="type of related task"), project_id: int = Query(None), group_id: int = Query(None), + visible: bool = Query(True), state: ResultState = Query(None), offset: int = Query(None), limit: int = Query(None), @@ -84,6 +116,7 @@ def list_datasets( group_id=group_id, source=source, state=state, + visible=visible, offset=offset, limit=limit, order_by=order_by.name, @@ -459,3 +492,44 @@ def create_dataset_fusion( logger.info("[create dataset] dataset record created: %s", dataset.name) return {"result": dataset} + + +@router.post( + "/evaluation", + response_model=schemas.dataset.DatasetEvaluationOut, +) +def evaluate_datasets( + *, + db: Session = Depends(deps.get_db), + evaluation_in: schemas.dataset.DatasetEvaluationCreate, + current_user: models.User = Depends(deps.get_current_active_user), + controller_client: ControllerClient = Depends(deps.get_controller_client), + viz_client: VizClient = Depends(deps.get_viz_client), + user_labels: UserLabels = Depends(deps.get_user_labels), +) -> Any: + """ + evaluate dataset against ground truth + """ + gt_dataset = crud.dataset.get(db, id=evaluation_in.gt_dataset_id) + other_datasets = crud.dataset.get_multi_by_ids(db, ids=evaluation_in.other_dataset_ids) + if not gt_dataset or len(evaluation_in.other_dataset_ids) != len(other_datasets): + raise DatasetNotFound() + if not is_same_group([gt_dataset, *other_datasets]): + # confine evaluation to the same dataset group + raise DatasetsNotInSameGroup() + + evaluations = evaluate_dataset( + controller_client, + viz_client, + current_user.id, + evaluation_in.project_id, + user_labels, + evaluation_in.confidence_threshold, + gt_dataset, + other_datasets, + ) + return {"result": evaluations} + + +def is_same_group(datasets: List[models.Dataset]) -> bool: + return len({dataset.dataset_group_id for dataset in datasets}) == 1 diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py index 00b20643b3..2343933558 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/images.py @@ -150,7 +150,7 @@ async def get_shared_images( @cache(expire=settings.APP_CACHE_EXPIRE_IN_SECONDS) async def get_shared_images_from_github(url: str, timeout: int) -> List[Dict]: - logger.debug("[share image] getting shared docker images from GitHub...") + logger.info("[share image] getting shared docker images from GitHub...") shared_images = get_github_table(url, timeout=timeout) return shared_images diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py index be029d0837..a0c4611815 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py @@ -53,6 +53,7 @@ def call_inference( try: resp = controller_client.call_inference( current_user.id, + model.project_id, model.hash, asset_dir, docker_image.url, diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py index 77438fde0e..7a809afa79 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/models.py @@ -12,6 +12,9 @@ ModelNotFound, DuplicateModelGroupError, FailedtoImportModel, + FailedToHideProtectedResources, + RefuseToProcessMixedOperations, + ProjectNotFound, ) from app.constants.state import TaskState, TaskType, ResultState from app.utils.ymir_controller import ControllerClient @@ -33,9 +36,34 @@ def batch_get_models( return {"result": models} +@router.post( + "/batch", + response_model=schemas.ModelsOut, +) +def batch_update_models( + *, + db: Session = Depends(deps.get_db), + model_ops: schemas.BatchOperations, + current_user: models.User = Depends(deps.get_current_active_user), +) -> Any: + project = crud.project.get(db, model_ops.project_id) + if not project: + raise ProjectNotFound() + to_process = {op.id_ for op in model_ops.operations} + if to_process.intersection(project.referenced_model_ids): + raise FailedToHideProtectedResources() + actions = {op.action for op in model_ops.operations} + if len(actions) != 1: + raise RefuseToProcessMixedOperations() + + models = crud.model.batch_toggle_visibility(db, ids=list(to_process), action=list(actions)[0]) + return {"result": models} + + class SortField(enum.Enum): id = "id" create_datetime = "create_datetime" + update_datetime = "update_datetime" map = "map" source = "source" @@ -47,6 +75,7 @@ def list_models( state: ResultState = Query(None), project_id: int = Query(None), group_id: int = Query(None), + visible: bool = Query(True), training_dataset_id: int = Query(None), offset: int = Query(None), limit: int = Query(None), @@ -73,6 +102,7 @@ def list_models( group_id=group_id, source=source, state=state, + visible=visible, offset=offset, limit=limit, order_by=order_by.name, diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py index 1c3034e026..4eebc0ef5a 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/projects.py @@ -94,19 +94,16 @@ def create_sample_project( project_task_hash = gen_task_hash(current_user.id, project.id) try: - training_classes = user_labels.get_class_ids(names_or_aliases=settings.SAMPLE_PROJECT_KEYWORDS) + user_labels.get_class_ids(names_or_aliases=settings.SAMPLE_PROJECT_KEYWORDS) except KeyError: # todo refactor keywords dependencies to handle ensure given keywords exist add_keywords(controller_client, cache, current_user.id, settings.SAMPLE_PROJECT_KEYWORDS) - user_labels = controller_client.get_labels_of_user(current_user.id) - training_classes = user_labels.get_class_ids(names_or_aliases=settings.SAMPLE_PROJECT_KEYWORDS) try: resp = controller_client.create_project( user_id=current_user.id, project_id=project.id, task_id=project_task_hash, - args={"training_classes": training_classes}, ) logger.info("[create task] controller response: %s", resp) except ValueError: @@ -132,7 +129,6 @@ def create_project( current_user: models.User = Depends(deps.get_current_active_user), project_in: schemas.ProjectCreate, controller_client: ControllerClient = Depends(deps.get_controller_client), - user_labels: UserLabels = Depends(deps.get_user_labels), clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), ) -> Any: """ @@ -146,15 +142,12 @@ def create_project( task_id = gen_task_hash(current_user.id, project.id) - training_classes = user_labels.get_class_ids(names_or_aliases=project_in.training_keywords) - # 2.send to controller try: resp = controller_client.create_project( user_id=current_user.id, project_id=project.id, task_id=task_id, - args={"training_classes": training_classes}, ) logger.info("[create task] controller response: %s", resp) except ValueError: @@ -294,3 +287,20 @@ def delete_project( continue return {"result": project} + + +@router.get( + "/{project_id}/status", + response_model=schemas.project.ProjectStatusOut, +) +def check_project_status( + *, + project_id: int = Path(...), + current_user: models.User = Depends(deps.get_current_active_user), + controller_client: ControllerClient = Depends(deps.get_controller_client), +) -> Any: + """ + Check if current project is dirty + """ + is_clean = controller_client.check_repo_status(user_id=current_user.id, project_id=project_id) + return {"result": {"is_dirty": not is_clean}} diff --git a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py index 634e7ac609..9a9b527a6c 100644 --- a/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py +++ b/ymir/backend/src/ymir_app/app/api/api_v1/endpoints/tasks.py @@ -1,8 +1,7 @@ import asyncio -from dataclasses import asdict import enum -import json -from typing import Any, Dict, List, Optional, Union, Tuple +from typing import Any, Union +from functools import partial import time from fastapi import APIRouter, Depends, Path, Query, Response, Request @@ -15,37 +14,55 @@ from app.api import deps from app.api.errors.errors import ( DuplicateTaskError, - FailedToConnectClickHouse, - FailedtoCreateTask, FailedToUpdateTaskStatus, - ModelNotFound, ModelNotReady, NoTaskPermission, ObsoleteTaskStatus, TaskNotFound, - DatasetNotFound, - DatasetGroupNotFound, ) from app.constants.state import ( FinalStates, TaskState, TaskType, - ResultType, - ResultState, ) from app.config import settings -from app.models.task import Task from app.utils.clickhouse import YmirClickHouse from app.utils.graph import GraphClient from app.utils.timeutil import convert_datetime_to_timestamp -from app.utils.ymir_controller import ControllerClient, gen_task_hash, gen_user_hash -from app.utils.ymir_viz import VizClient, ModelMetaData, DatasetMetaData +from app.utils.ymir_controller import ControllerClient, gen_user_hash +from app.utils.ymir_viz import VizClient from app.libs.redis_stream import RedisStream +from app.libs.tasks import TaskResult, create_single_task from common_utils.labels import UserLabels router = APIRouter() +@router.post( + "/batch", + response_model=schemas.BatchTasksCreateResults, +) +def batch_create_tasks( + *, + db: Session = Depends(deps.get_db), + current_user: models.User = Depends(deps.get_current_active_user), + user_labels: UserLabels = Depends(deps.get_user_labels), + batch_tasks_in: schemas.BatchTasksCreate, +) -> Any: + f_create_task = partial(create_single_task, db, current_user.id, user_labels) + + results = [] + # run in iteration by design to avoid datasets version number conflicts + for payload in batch_tasks_in.payloads: + try: + result = f_create_task(payload) + except Exception: + logger.exception("[batch create task] failed to create task by payload: %s", payload) + result = None + results.append(result) + return {"result": results} + + class SortField(enum.Enum): id = "id" create_datetime = "create_datetime" @@ -98,334 +115,20 @@ def create_task( db: Session = Depends(deps.get_db), task_in: schemas.TaskCreate, current_user: models.User = Depends(deps.get_current_active_user), - viz_client: VizClient = Depends(deps.get_viz_client), - controller_client: ControllerClient = Depends(deps.get_controller_client), - clickhouse: YmirClickHouse = Depends(deps.get_clickhouse_client), user_labels: UserLabels = Depends(deps.get_user_labels), ) -> Any: """ Create task """ # 1. validation - logger.debug("[create task] create task with payload: %s", jsonable_encoder(task_in)) + logger.info("[create task] create task with payload: %s", jsonable_encoder(task_in)) if crud.task.is_duplicated_name_in_project(db, project_id=task_in.project_id, name=task_in.name): raise DuplicateTaskError() - # 2. prepare keywords and task parameters - args = normalize_parameters(db, task_in.parameters, task_in.docker_image_config, user_labels) - - # 3. call controller - task_hash = gen_task_hash(current_user.id, task_in.project_id) - try: - resp = controller_client.create_task( - user_id=current_user.id, - project_id=task_in.project_id, - task_id=task_hash, - task_type=task_in.type, - args=args, - task_parameters=task_in.parameters.json() if task_in.parameters else None, - ) - logger.info("[create task] controller response: %s", resp) - except ValueError: - # todo parse error message - raise FailedtoCreateTask() - - # 4. create task record - task = crud.task.create_task(db, obj_in=task_in, task_hash=task_hash, user_id=current_user.id) - task_info = schemas.TaskInternal.from_orm(task) - - # 5. create task result record (dataset or model) - task_result = TaskResult(db=db, controller=controller_client, viz=viz_client, task_in_db=task) - task_result.create(task_in.parameters.dataset_id) - - # 6. send metric to clickhouse - try: - write_clickhouse_metrics( - clickhouse, - task_info, - args["dataset_group_id"], - args["dataset_id"], - task_in.parameters.model_id, - task_in.parameters.keywords or [], - ) - except FailedToConnectClickHouse: - # clickhouse metric shouldn't block create task process - logger.exception( - "[create task] failed to write task(%s) stats to clickhouse, continue anyway", - task.hash, - ) + task_in_db = create_single_task(db, current_user.id, user_labels, task_in) logger.info("[create task] created task name: %s", task_in.name) - return {"result": task} - - -class TaskResult: - def __init__( - self, - db: Session, - controller: ControllerClient, - viz: VizClient, - task_in_db: models.Task, - ): - self.db = db - self.task_in_db = task_in_db - self.task = schemas.TaskInternal.from_orm(task_in_db) - - self.result_type = ResultType(self.task.result_type) - self.user_id = self.task.user_id - self.project_id = self.task.project_id - self.task_hash = self.task.hash - self.controller = controller - - viz.initialize( - user_id=self.user_id, - project_id=self.project_id, - branch_id=self.task_hash, - ) - self.viz = viz - - self._result: Optional[Union[DatasetMetaData, ModelMetaData]] = None - self._user_labels: Optional[Dict] = None - - @property - def user_labels(self) -> Dict: - """ - Lazy evaluate labels from controller - """ - if self._user_labels is None: - self._user_labels = self.controller.get_labels_of_user(self.user_id) - return self._user_labels - - @property - def model_info(self) -> ModelMetaData: - result = self.viz.get_model() - try: - self.save_model_stats(result) - except FailedToConnectClickHouse: - logger.exception("Failed to write model stats to clickhouse, continue anyway") - return result - - @property - def dataset_info(self) -> DatasetMetaData: - return self.viz.get_dataset(user_labels=self.user_labels) - - @property - def result_info(self) -> Union[DatasetMetaData, ModelMetaData]: - if self._result is None: - self._result = self.model_info if self.result_type is ResultType.model else self.dataset_info - return self._result - - def save_model_stats(self, result: ModelMetaData) -> None: - model_in_db = crud.model.get_by_task_id(self.db, task_id=self.task.id) - if not model_in_db: - logger.warning("[update task] found no model to save model stats(%s)", result) - return - project_in_db = crud.project.get(self.db, id=self.project_id) - keywords = schemas.Project.from_orm(project_in_db).training_keywords - clickhouse = YmirClickHouse() - clickhouse.save_model_result( - model_in_db.create_datetime, - self.user_id, - model_in_db.project_id, - model_in_db.model_group_id, - model_in_db.id, - model_in_db.name, - result.hash, - result.map, - keywords, - ) - - def get_dest_group_info(self, dataset_id: int) -> Tuple[int, str]: - if self.result_type is ResultType.dataset: - dataset = crud.dataset.get(self.db, id=dataset_id) - if not dataset: - logger.error( - "Failed to predict dest dataset_group_id from non-existing dataset(%s)", - dataset_id, - ) - raise DatasetNotFound() - dataset_group = crud.dataset_group.get(self.db, id=dataset.dataset_group_id) - if not dataset_group: - raise DatasetGroupNotFound() - return dataset_group.id, dataset_group.name - else: - model_group = crud.model_group.get_from_training_dataset(self.db, training_dataset_id=dataset_id) - if not model_group: - model_group = crud.model_group.create_model_group( - self.db, - user_id=self.user_id, - project_id=self.project_id, - training_dataset_id=dataset_id, - ) - logger.info( - "[create task] created model_group(%s) for dataset(%s)", - model_group.id, - dataset_id, - ) - return model_group.id, model_group.name - - def create(self, dataset_id: int) -> Dict[str, Dict]: - dest_group_id, dest_group_name = self.get_dest_group_info(dataset_id) - if self.result_type is ResultType.dataset: - dataset = crud.dataset.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name) - logger.info("[create task] created new dataset(%s) as task result", dataset.name) - return {"dataset": jsonable_encoder(dataset)} - elif self.result_type is ResultType.model: - model = crud.model.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name) - logger.info("[create task] created new model(%s) as task result", model.name) - return {"model": jsonable_encoder(model)} - else: - logger.info("[create task] no task result record needed") - return {} - - def update( - self, - task_result: schemas.TaskUpdateStatus, - ) -> models.Task: - task_in_db = crud.task.get(self.db, id=self.task.id) - if not task_in_db: - logger.error( - "[update task] could not find target task (%s) to update, ignore", - self.task.id, - ) - raise TaskNotFound() - - if task_result.state in FinalStates: - logger.info( - "[update task] task reached final state(%s), handling result: %s", - task_result.state, - task_result, - ) - self.update_task_result(task_result, task_in_db) - - logger.info( - "[update task] updating task state %s and percent %s", - task_result.state, - task_result.percent, - ) - return crud.task.update_state_and_percent( - self.db, - task=task_in_db, - new_state=task_result.state, - state_code=task_result.state_code, - percent=task_result.percent, - ) - - def update_task_result(self, task_result: schemas.TaskUpdateStatus, task_in_db: Task) -> None: - if self.result_type is ResultType.dataset: - crud_func = crud.dataset - elif self.result_type is ResultType.model: - crud_func = crud.model # type: ignore - else: - logger.info("[update task] no task result to update") - return - - result_record = crud_func.get_by_task_id(self.db, task_id=self.task.id) - if not result_record: - logger.error("[update task] task result record not found, skip") - return - - if task_result.state is TaskState.done: - if isinstance(self.result_info, ModelMetaData): - crud.task.update_parameters_and_config( - self.db, - task=task_in_db, - parameters=self.result_info.task_parameters, - config=json.dumps(self.result_info.executor_config), - ) - crud_func.finish( - self.db, - result_record.id, - result_state=ResultState.ready, - result=asdict(self.result_info), - ) - else: - if self.result_type is ResultType.model: - try: - crud.model.finish( - self.db, result_record.id, result_state=ResultState.ready, result=asdict(self.model_info) - ) - except (ModelNotReady, ModelNotFound): - logger.exception("[update task] failed to get model from failed task") - crud_func.finish( - self.db, - result_record.id, - result_state=ResultState.error, - ) - else: - crud_func.finish( - self.db, - result_record.id, - result_state=ResultState.error, - ) - - -def write_clickhouse_metrics( - clickhouse: YmirClickHouse, - task_info: schemas.TaskInternal, - dataset_group_id: int, - dataset_id: int, - model_id: Optional[int], - keywords: List[str], -) -> None: - # for task stats - clickhouse.save_task_parameter( - dt=task_info.create_datetime, - user_id=task_info.user_id, - project_id=task_info.project_id, - name=task_info.name, - hash_=task_info.hash, - type_=TaskType(task_info.type).name, - dataset_ids=[dataset_id], - model_ids=[model_id] if model_id else [], - keywords=keywords, - ) - # for keywords recommendation - clickhouse.save_dataset_keyword( - dt=task_info.create_datetime, - user_id=task_info.user_id, - project_id=task_info.project_id, - group_id=dataset_group_id, - dataset_id=dataset_id, - keywords=keywords, - ) - - -def normalize_parameters( - db: Session, - parameters: schemas.TaskParameter, - docker_image_config: Optional[Dict], - user_labels: UserLabels, -) -> Dict: - normalized = parameters.dict() # type: Dict[str, Any] - - # training, mining and inference task has docker_config - normalized["docker_config"] = docker_image_config - - dataset = crud.dataset.get(db, id=parameters.dataset_id) - if not dataset: - logger.error("[create task] main dataset(%s) not exists", parameters.dataset_id) - raise DatasetNotFound() - normalized["dataset_hash"] = dataset.hash - normalized["dataset_group_id"] = dataset.dataset_group_id - # label task uses dataset name as task name for LabelStudio - normalized["dataset_name"] = dataset.name - - if parameters.validation_dataset_id: - validation_dataset = crud.dataset.get(db, id=parameters.validation_dataset_id) - if not validation_dataset: - logger.error("[create task] validation dataset(%s) not exists", parameters.validation_dataset_id) - raise DatasetNotFound() - normalized["validation_dataset_hash"] = validation_dataset.hash - - if parameters.model_id: - model = crud.model.get(db, id=parameters.model_id) - if model: - normalized["model_hash"] = model.hash - - if parameters.keywords: - normalized["class_ids"] = user_labels.get_class_ids(names_or_aliases=parameters.keywords) - return normalized + return {"result": task_in_db} @router.delete( @@ -568,7 +271,7 @@ def update_task_status( convert_datetime_to_timestamp(task_in_db.last_message_datetime), task_update.timestamp, ): - logger.debug("[update status] ignore obsolete message") + logger.info("[update status] ignore obsolete message") raise ObsoleteTaskStatus() task = schemas.TaskInternal.from_orm(task_in_db) @@ -577,28 +280,28 @@ def update_task_status( raise ObsoleteTaskStatus() # 3. Update task and task_result(could be dataset or model) - task_result = TaskResult(db=db, controller=controller_client, viz=viz_client, task_in_db=task_in_db) + task_result = TaskResult(db=db, task_in_db=task_in_db) try: - task_in_db = task_result.update(task_result=task_update) + updated_task = task_result.update(task_result=task_update) except (ConnectionError, HTTPError, Timeout): logger.error("Failed to update update task status") raise FailedToUpdateTaskStatus() except ModelNotReady: logger.warning("Model Not Ready") - - namespace = f"/{gen_user_hash(task.user_id)}" - task_update_msg = schemas.TaskResultUpdateMessage( - task_id=task_in_db.hash, - timestamp=time.time(), - percent=task_in_db.percent, - state=task_in_db.state, - result_model=task_in_db.result_model, # type: ignore - result_dataset=task_in_db.result_dataset, # type: ignore - ) - # todo compatible with current frontend data structure - # reformatting is needed - payload = {task_in_db.hash: task_update_msg.dict()} - asyncio.run(request.app.sio.emit(event="update_taskstate", data=payload, namespace=namespace)) + else: + namespace = f"/{gen_user_hash(task.user_id)}" + task_update_msg = schemas.TaskResultUpdateMessage( + task_id=updated_task.hash, + timestamp=time.time(), + percent=updated_task.percent, + state=updated_task.state, + result_model=updated_task.result_model, # type: ignore + result_dataset=updated_task.result_dataset, # type: ignore + ) + # todo compatible with current frontend data structure + # reformatting is needed + payload = {updated_task.hash: task_update_msg.dict()} + asyncio.run(request.app.sio.emit(event="update_taskstate", data=payload, namespace=namespace)) return {"result": task_in_db} @@ -607,10 +310,6 @@ def is_obsolete_message(last_update_time: Union[float, int], msg_time: Union[flo return last_update_time > msg_time -def get_default_record_name(task_hash: str, task_name: str) -> str: - return f"{task_name}_{task_hash[-6:]}" - - @router.post( "/events", response_model=schemas.TaskOut, diff --git a/ymir/backend/src/ymir_app/app/api/errors/errors.py b/ymir/backend/src/ymir_app/app/api/errors/errors.py index 54dc936ca5..2d6ee946d6 100644 --- a/ymir/backend/src/ymir_app/app/api/errors/errors.py +++ b/ymir/backend/src/ymir_app/app/api/errors/errors.py @@ -98,6 +98,11 @@ class FailedtoCreateModel(ControllerError): message = "Failed to Create Model via Controller" +class FailedToEvaluate(ControllerError): + code = error_codes.FAILED_TO_EVALUATE + message = "Failed to RUN EVALUATE CMD via Controller" + + class RequiredFieldMissing(APIError): code = error_codes.REQUIRED_FIELD_MISSING message = "Required Field Missing" @@ -138,6 +143,11 @@ class ModelNotFound(NotFound): message = "Model Not Found" +class DatasetEvaluationNotFound(NotFound): + code = error_codes.DATASET_EVALUATION_NOT_FOUND + message = "Dataset Evaluation Not Found" + + class ModelNotReady(APIError): code = error_codes.MODEL_NOT_READY message = "Model Not Ready" @@ -290,6 +300,11 @@ class DuplicateProjectError(DuplicateError): message = "Duplicated Project Name" +class FailedToHideProtectedResources(APIError): + code = error_codes.FAILED_TO_PROCESS_PROTECTED_RESOURCES + message = "Failed to Hide Protected Resources in Project" + + class DatasetGroupNotFound(NotFound): code = error_codes.DATASET_GROUP_NOT_FOUND message = "DatasetGroup Not Found" @@ -333,3 +348,18 @@ class IterationNotFound(NotFound): class FailedToUpdateIterationStage(APIError): code = error_codes.ITERATION_COULD_NOT_UPDATE_STAGE message = "Failed to Update Iteration Stage" + + +class RefuseToProcessMixedOperations(APIError): + code = error_codes.REFUSE_TO_PROCESS_MIXED_OPERATIONS + message = "Refuse To Process Mixed Operations" + + +class MissingOperations(APIError): + code = error_codes.MISSING_OPERATIONS + message = "Missing Operations" + + +class DatasetsNotInSameGroup(APIError): + code = error_codes.DATASETS_NOT_IN_SAME_GROUP + message = "Datasets Not in the Same Group" diff --git a/ymir/backend/src/ymir_app/app/clean_tasks.py b/ymir/backend/src/ymir_app/app/clean_tasks.py index 39c0f9945f..730c3d0c53 100644 --- a/ymir/backend/src/ymir_app/app/clean_tasks.py +++ b/ymir/backend/src/ymir_app/app/clean_tasks.py @@ -31,9 +31,6 @@ def terminate_tasks() -> None: logger.error("No initial user yet, quit") return for task in list_unfinished_tasks(db): - if not (task.hash and task.type): - # make mypy happy - continue if task.type in settings.TASK_TYPES_WHITELIST: # do not terminate task having whitelist type continue diff --git a/ymir/backend/src/ymir_app/app/config.py b/ymir/backend/src/ymir_app/app/config.py index 025b23cd9f..054a50fec9 100644 --- a/ymir/backend/src/ymir_app/app/config.py +++ b/ymir/backend/src/ymir_app/app/config.py @@ -17,6 +17,7 @@ class Settings(BaseSettings): APP_SECRET_KEY: str = secrets.token_urlsafe(32) DEFAULT_LIMIT: int = 20 STRING_LEN_LIMIT: int = 100 + LONG_STRING_LEN_LIMIT: int = 500 TEXT_LEN_LIMIT: int = 20000 SENTRY_DSN: Optional[str] REGISTRATION_NEEDS_APPROVAL: bool = False @@ -63,7 +64,8 @@ class Settings(BaseSettings): # RUNTIME RUNTIMES: Optional[ - str] = '[{"name": "sample_image", "hash": "6d30c27861c5", "type": 1, "url": "industryessentials/executor-example:latest", "configs": [{"expected_map": 0.983, "idle_seconds": 60, "trigger_crash": 0, "type": 1}, {"idle_seconds": 6, "trigger_crash": 0, "type": 2}, {"idle_seconds": 3, "trigger_crash": 0, "type": 9}]}]' # noqa: E501 + str + ] = '[{"name": "sample_image", "hash": "6d30c27861c5", "description": "Demonstration only. This docker image trains fake model after a requested length of running time period. These models can only be used by this same docker image for re-training/mining/inference purposes. Adjust the hyper-parameters to set the length of running time, or to trigger crash, to set expected mAP, etc", "url": "industryessentials/executor-example:latest", "configs": [{"expected_map": 0.983, "idle_seconds": 60, "trigger_crash": 0, "type": 1}, {"idle_seconds": 6, "trigger_crash": 0, "type": 2}, {"idle_seconds": 3, "trigger_crash": 0, "type": 9}]}]' # noqa: E501 # Online Sheet SHARING_TIMEOUT: int = 10 diff --git a/ymir/backend/src/ymir_app/app/crud/crud_dataset.py b/ymir/backend/src/ymir_app/app/crud/crud_dataset.py index 117f6d0132..bc8371e9f7 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_dataset.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_dataset.py @@ -23,6 +23,7 @@ def get_multi_datasets( group_id: Optional[int] = None, source: Optional[TaskType] = None, state: Optional[IntEnum] = None, + visible: bool = True, start_time: Optional[int] = None, end_time: Optional[int] = None, offset: Optional[int] = 0, @@ -33,7 +34,11 @@ def get_multi_datasets( # each dataset is associate with one task # we need related task info as well query = db.query(self.model) - query = query.filter(self.model.user_id == user_id, not_(self.model.is_deleted)) + query = query.filter( + self.model.user_id == user_id, + self.model.is_visible == int(visible), + not_(self.model.is_deleted), + ) if start_time and end_time: _start_time = datetime.utcfromtimestamp(start_time) @@ -157,5 +162,16 @@ def remove_group_resources(self, db: Session, *, group_id: int) -> List[Dataset] db.commit() return objs + def batch_toggle_visibility(self, db: Session, *, ids: List[int], action: str) -> List[Dataset]: + objs = self.get_multi_by_ids(db, ids=ids) + for obj in objs: + if action == "hide": + obj.is_visible = False + elif action == "unhide": + obj.is_visible = True + db.bulk_save_objects(objs) + db.commit() + return objs + dataset = CRUDDataset(Dataset) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py b/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py index b14cf7952f..26098e31d7 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_dataset_group.py @@ -43,7 +43,7 @@ def get_multi_dataset_groups( is_desc: bool = True, ) -> Tuple[List[DatasetGroup], int]: query = db.query(self.model) - query = query.filter(self.model.user_id == user_id, not_(self.model.is_deleted)) + query = query.filter(self.model.user_id == user_id, self.model.visible_datasets, not_(self.model.is_deleted)) if start_time and end_time: _start_time = datetime.utcfromtimestamp(start_time) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_model.py b/ymir/backend/src/ymir_app/app/crud/crud_model.py index 6a1bace12d..be4c38a68c 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_model.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_model.py @@ -22,6 +22,7 @@ def get_multi_models( group_id: Optional[int] = None, source: Optional[TaskType] = None, state: Optional[IntEnum] = None, + visible: bool = True, start_time: Optional[int], end_time: Optional[int], offset: Optional[int], @@ -30,7 +31,11 @@ def get_multi_models( is_desc: bool = True, ) -> Tuple[List[Model], int]: query = db.query(self.model) - query = query.filter(self.model.user_id == user_id, not_(self.model.is_deleted)) + query = query.filter( + self.model.user_id == user_id, + self.model.is_visible == int(visible), + not_(self.model.is_deleted), + ) if start_time and end_time: _start_time = datetime.utcfromtimestamp(start_time) @@ -154,5 +159,16 @@ def remove_group_resources(self, db: Session, *, group_id: int) -> List[Model]: db.commit() return objs + def batch_toggle_visibility(self, db: Session, *, ids: List[int], action: str) -> List[Model]: + objs = self.get_multi_by_ids(db, ids=ids) + for obj in objs: + if action == "hide": + obj.is_visible = False + elif action == "unhide": + obj.is_visible = True + db.bulk_save_objects(objs) + db.commit() + return objs + model = CRUDModel(Model) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_model_group.py b/ymir/backend/src/ymir_app/app/crud/crud_model_group.py index edbf282d7f..ef12d5ac41 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_model_group.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_model_group.py @@ -46,7 +46,7 @@ def get_multi_model_groups( is_desc: bool = True, ) -> Tuple[List[ModelGroup], int]: query = db.query(self.model) - query = query.filter(self.model.user_id == user_id, not_(self.model.is_deleted)) + query = query.filter(self.model.user_id == user_id, self.model.visible_models, not_(self.model.is_deleted)) if start_time and end_time: _start_time = datetime.utcfromtimestamp(start_time) diff --git a/ymir/backend/src/ymir_app/app/crud/crud_project.py b/ymir/backend/src/ymir_app/app/crud/crud_project.py index cb532c7f46..1cc9e8edc1 100644 --- a/ymir/backend/src/ymir_app/app/crud/crud_project.py +++ b/ymir/backend/src/ymir_app/app/crud/crud_project.py @@ -5,6 +5,7 @@ from sqlalchemy import and_, desc, not_ from sqlalchemy.orm import Session +from app.config import settings from app.crud.base import CRUDBase from app.models import Project from app.schemas.project import ProjectCreate, ProjectUpdate @@ -79,6 +80,11 @@ def get_multi_projects( return query.offset(offset).limit(limit).all(), query.count() return query.all(), query.count() + def get_all_projects(self, db: Session, *, offset: int = 0, limit: int = settings.DEFAULT_LIMIT) -> List[Project]: + query = db.query(self.model) + query = query.filter(not_(self.model.is_deleted)) + return query.offset(offset).limit(limit).all() + def update_current_iteration( self, db: Session, diff --git a/ymir/backend/src/ymir_app/app/fix_dirty_repos.py b/ymir/backend/src/ymir_app/app/fix_dirty_repos.py new file mode 100644 index 0000000000..b2bfc9b6ef --- /dev/null +++ b/ymir/backend/src/ymir_app/app/fix_dirty_repos.py @@ -0,0 +1,56 @@ +import logging +from typing import List, Iterator, Dict +from concurrent.futures import ThreadPoolExecutor +from functools import partial + +from sqlalchemy.orm import Session + +from app import crud +from app.config import settings +from app.db.session import SessionLocal +from app.models.project import Project +from app.utils.ymir_controller import ControllerClient + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +def iter_all_projects(db: Session, batch_size: int = settings.DEFAULT_LIMIT) -> Iterator[List[Project]]: + offset = 0 + projects = crud.project.get_all_projects(db, offset=offset, limit=batch_size) + while projects: + yield projects + offset = offset + batch_size + projects = crud.project.get_all_projects(db, offset=offset, limit=batch_size) + + +def fix_repo(controller: ControllerClient, project: Project) -> Dict: + return controller.fix_repo(user_id=project.user_id, project_id=project.id) + + +def batch_fix_repos(controller: ControllerClient, projects: List[Project]) -> List: + f_fix_repo = partial(fix_repo, controller) + with ThreadPoolExecutor() as executor: + res = executor.map(f_fix_repo, projects) + return list(res) + + +def fix_all_repos() -> None: + db = SessionLocal() + controller = ControllerClient() + for project_batch in iter_all_projects(db): + try: + batch_fix_repos(controller, project_batch) + except Exception: + # fix dirty repos shouldn't break start up process + logger.exception("Failed to clean a batch of repos: %s", project_batch) + + +def main() -> None: + logger.info("Fix all dirty repos upon start up") + fix_all_repos() + logger.info("Fixed dirty repos") + + +if __name__ == "__main__": + main() diff --git a/ymir/backend/src/ymir_app/app/libs/datasets.py b/ymir/backend/src/ymir_app/app/libs/datasets.py index a304df0ce4..22ac250e85 100644 --- a/ymir/backend/src/ymir_app/app/libs/datasets.py +++ b/ymir/backend/src/ymir_app/app/libs/datasets.py @@ -1,4 +1,4 @@ -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, List import tempfile import pathlib from zipfile import BadZipFile @@ -6,19 +6,23 @@ from fastapi.logger import logger from sqlalchemy.orm import Session -from app import crud, schemas +from app import crud, schemas, models from app.api.errors.errors import ( DatasetNotFound, FailedtoCreateDataset, + FailedToEvaluate, ) from app.config import settings from app.constants.state import ResultState from app.utils.files import FailedToDownload, verify_import_path, prepare_imported_dataset_dir, InvalidFileStructure +from app.utils.ymir_viz import VizClient from app.utils.ymir_controller import ( ControllerClient, gen_user_hash, gen_repo_hash, + gen_task_hash, ) +from common_utils.labels import UserLabels def import_dataset_in_background( @@ -31,7 +35,7 @@ def import_dataset_in_background( ) -> None: try: _import_dataset(db, controller_client, dataset_import, user_id, task_hash) - except (BadZipFile, FailedToDownload, FailedtoCreateDataset, DatasetNotFound, InvalidFileStructure): + except (OSError, BadZipFile, FailedToDownload, FailedtoCreateDataset, DatasetNotFound, InvalidFileStructure): logger.exception("[import dataset] failed to import dataset") crud.dataset.update_state(db, dataset_id=dataset_id, new_state=ResultState.error) @@ -106,3 +110,35 @@ def data_dir(self) -> pathlib.Path: else: raise ValueError("input_path or input_url is required") return pathlib.Path(self._data_dir) + + +def evaluate_dataset( + controller: ControllerClient, + viz: VizClient, + user_id: int, + project_id: int, + user_labels: UserLabels, + confidence_threshold: float, + gt_dataset: models.Dataset, + other_datasets: List[models.Dataset], +) -> Dict: + # temporary task hash used to fetch evaluation result later + task_hash = gen_task_hash(user_id, project_id) + try: + controller.evaluate_dataset( + user_id, + project_id, + task_hash, + confidence_threshold, + gt_dataset.hash, + [dataset.hash for dataset in other_datasets], + ) + except ValueError: + logger.exception("Failed to evaluate via controller") + raise FailedToEvaluate() + # todo refactor + viz.initialize(user_id=user_id, project_id=project_id, branch_id=task_hash) + evaluations = viz.get_evaluations(user_labels) + + dataset_id_mapping = {dataset.hash: dataset.id for dataset in other_datasets} + return {dataset_id_mapping[hash_]: evaluation for hash_, evaluation in evaluations.items()} diff --git a/ymir/backend/src/ymir_app/app/libs/models.py b/ymir/backend/src/ymir_app/app/libs/models.py index 070cf1a020..5abe011f5a 100644 --- a/ymir/backend/src/ymir_app/app/libs/models.py +++ b/ymir/backend/src/ymir_app/app/libs/models.py @@ -13,7 +13,7 @@ FieldValidationFailed, ) from app.constants.state import ResultState, TaskType -from app.utils.files import NGINX_DATA_PATH, save_file +from app.utils.files import NGINX_DATA_PATH, save_file, FailedToDownload from app.utils.ymir_controller import gen_user_hash, gen_repo_hash, ControllerClient from app.config import settings @@ -28,7 +28,15 @@ def import_model_in_background( ) -> None: try: _import_model(db, controller_client, model_import, user_id, task_hash) - except (ValueError, FieldValidationFailed, FailedtoImportModel, ModelNotFound, TaskNotFound): + except ( + ValueError, + OSError, + FieldValidationFailed, + FailedtoImportModel, + ModelNotFound, + TaskNotFound, + FailedToDownload, + ): logger.exception("[import model] failed to import model, set model result_state to error") crud.model.update_state(db, model_id=model_id, new_state=ResultState.error) diff --git a/ymir/backend/src/ymir_app/app/libs/tasks.py b/ymir/backend/src/ymir_app/app/libs/tasks.py index 34cd218102..86801a06fc 100644 --- a/ymir/backend/src/ymir_app/app/libs/tasks.py +++ b/ymir/backend/src/ymir_app/app/libs/tasks.py @@ -1,12 +1,37 @@ +import json import itertools import asyncio -from typing import Dict, List, Tuple +from typing import Any, Dict, List, Tuple, Optional, Union import aiohttp +from dataclasses import asdict +from fastapi.logger import logger +from fastapi.encoders import jsonable_encoder +from sqlalchemy.orm import Session +from app.api.errors.errors import ( + FailedToUpdateTaskStatus, + FailedtoCreateTask, + FailedToConnectClickHouse, + ModelNotReady, + ModelNotFound, + TaskNotFound, + DatasetNotFound, + DatasetGroupNotFound, +) +from app.constants.state import ( + FinalStates, + TaskState, + TaskType, + ResultType, + ResultState, +) from app.config import settings -from app import schemas -from app.api.errors.errors import FailedToUpdateTaskStatus +from app import schemas, crud, models +from app.utils.ymir_controller import ControllerClient, gen_task_hash +from app.utils.clickhouse import YmirClickHouse +from app.utils.ymir_viz import VizClient, ModelMetaData, DatasetMetaData +from common_utils.labels import UserLabels class Retry(Exception): @@ -43,3 +68,308 @@ async def batch_update_task_status(events: List[Tuple[str, Dict]]) -> List[str]: results = await asyncio.gather(*tasks, return_exceptions=True) success_id_selectors = [not isinstance(res, Exception) for res in results] return list(itertools.compress(ids, success_id_selectors)) + + +def normalize_parameters( + db: Session, + parameters: schemas.TaskParameter, + docker_image_config: Optional[Dict], + user_labels: UserLabels, +) -> Dict: + normalized = parameters.dict() # type: Dict[str, Any] + + # training, mining and inference task has docker_config + normalized["docker_config"] = docker_image_config + + dataset = crud.dataset.get(db, id=parameters.dataset_id) + if not dataset: + logger.error("[create task] main dataset(%s) not exists", parameters.dataset_id) + raise DatasetNotFound() + normalized["dataset_hash"] = dataset.hash + normalized["dataset_group_id"] = dataset.dataset_group_id + # label task uses dataset name as task name for LabelStudio + normalized["dataset_name"] = dataset.name + + if parameters.validation_dataset_id: + validation_dataset = crud.dataset.get(db, id=parameters.validation_dataset_id) + if not validation_dataset: + logger.error("[create task] validation dataset(%s) not exists", parameters.validation_dataset_id) + raise DatasetNotFound() + normalized["validation_dataset_hash"] = validation_dataset.hash + + if parameters.model_id: + model = crud.model.get(db, id=parameters.model_id) + if model: + normalized["model_hash"] = model.hash + + if parameters.keywords: + normalized["class_ids"] = user_labels.get_class_ids(names_or_aliases=parameters.keywords) + return normalized + + +def write_clickhouse_metrics( + task_info: schemas.TaskInternal, + dataset_group_id: int, + dataset_id: int, + model_id: Optional[int], + keywords: List[str], +) -> None: + # for task stats + clickhouse = YmirClickHouse() + clickhouse.save_task_parameter( + dt=task_info.create_datetime, + user_id=task_info.user_id, + project_id=task_info.project_id, + name=task_info.name, + hash_=task_info.hash, + type_=TaskType(task_info.type).name, + dataset_ids=[dataset_id], + model_ids=[model_id] if model_id else [], + keywords=keywords, + ) + # for keywords recommendation + clickhouse.save_dataset_keyword( + dt=task_info.create_datetime, + user_id=task_info.user_id, + project_id=task_info.project_id, + group_id=dataset_group_id, + dataset_id=dataset_id, + keywords=keywords, + ) + + +def create_single_task(db: Session, user_id: int, user_labels: UserLabels, task_in: schemas.TaskCreate) -> models.Task: + args = normalize_parameters(db, task_in.parameters, task_in.docker_image_config, user_labels) + task_hash = gen_task_hash(user_id, task_in.project_id) + try: + controller_client = ControllerClient() + resp = controller_client.create_task( + user_id=user_id, + project_id=task_in.project_id, + task_id=task_hash, + task_type=task_in.type, + args=args, + task_parameters=task_in.parameters.json() if task_in.parameters else None, + ) + logger.info("[create task] controller response: %s", resp) + except ValueError: + raise FailedtoCreateTask() + + task = crud.task.create_task(db, obj_in=task_in, task_hash=task_hash, user_id=user_id) + task_info = schemas.TaskInternal.from_orm(task) + + task_result = TaskResult(db=db, task_in_db=task) + task_result.create(task_in.parameters.dataset_id) + + try: + write_clickhouse_metrics( + task_info, + args["dataset_group_id"], + args["dataset_id"], + task_in.parameters.model_id, + task_in.parameters.keywords or [], + ) + except FailedToConnectClickHouse: + # clickhouse metric shouldn't block create task process + logger.exception( + "[create task] failed to write task(%s) stats to clickhouse, continue anyway", + task.hash, + ) + logger.info("[create task] created task name: %s", task_info.name) + return task + + +class TaskResult: + def __init__( + self, + db: Session, + task_in_db: models.Task, + ): + self.db = db + self.task_in_db = task_in_db + self.task = schemas.TaskInternal.from_orm(task_in_db) + + self.result_type = ResultType(self.task.result_type) + self.user_id = self.task.user_id + self.project_id = self.task.project_id + self.task_hash = self.task.hash + self.controller = ControllerClient() + self.viz = VizClient() + self.viz.initialize( + user_id=self.user_id, + project_id=self.project_id, + branch_id=self.task_hash, + ) + + self._result: Optional[Union[DatasetMetaData, ModelMetaData]] = None + self._user_labels: Optional[Dict] = None + + @property + def user_labels(self) -> Dict: + """ + Lazy evaluate labels from controller + """ + if self._user_labels is None: + self._user_labels = self.controller.get_labels_of_user(self.user_id) + return self._user_labels + + @property + def model_info(self) -> Optional[ModelMetaData]: + try: + result = self.viz.get_model() + except (ModelNotReady, ModelNotFound): + logger.exception("[update task] failed to get model from task") + return None + else: + return result + + @property + def dataset_info(self) -> DatasetMetaData: + return self.viz.get_dataset(user_labels=self.user_labels) + + @property + def result_info(self) -> Union[DatasetMetaData, ModelMetaData, None]: + if self._result is None: + self._result = self.model_info if self.result_type is ResultType.model else self.dataset_info + return self._result + + def save_model_stats(self, result: ModelMetaData) -> None: + model_in_db = crud.model.get_by_task_id(self.db, task_id=self.task.id) + if not model_in_db: + logger.warning("[update task] found no model to save model stats(%s)", result) + return + project_in_db = crud.project.get(self.db, id=self.project_id) + keywords = schemas.Project.from_orm(project_in_db).training_keywords + clickhouse = YmirClickHouse() + clickhouse.save_model_result( + model_in_db.create_datetime, + self.user_id, + model_in_db.project_id, + model_in_db.model_group_id, + model_in_db.id, + model_in_db.name, + result.hash, + result.map, + keywords, + ) + + def get_dest_group_info(self, dataset_id: int) -> Tuple[int, str]: + if self.result_type is ResultType.dataset: + dataset = crud.dataset.get(self.db, id=dataset_id) + if not dataset: + logger.error( + "Failed to predict dest dataset_group_id from non-existing dataset(%s)", + dataset_id, + ) + raise DatasetNotFound() + dataset_group = crud.dataset_group.get(self.db, id=dataset.dataset_group_id) + if not dataset_group: + raise DatasetGroupNotFound() + return dataset_group.id, dataset_group.name + else: + model_group = crud.model_group.get_from_training_dataset(self.db, training_dataset_id=dataset_id) + if not model_group: + model_group = crud.model_group.create_model_group( + self.db, + user_id=self.user_id, + project_id=self.project_id, + training_dataset_id=dataset_id, + ) + logger.info( + "[create task] created model_group(%s) for dataset(%s)", + model_group.id, + dataset_id, + ) + return model_group.id, model_group.name + + def create(self, dataset_id: int) -> Dict[str, Dict]: + dest_group_id, dest_group_name = self.get_dest_group_info(dataset_id) + if self.result_type is ResultType.dataset: + dataset = crud.dataset.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name) + logger.info("[create task] created new dataset(%s) as task result", dataset.name) + return {"dataset": jsonable_encoder(dataset)} + elif self.result_type is ResultType.model: + model = crud.model.create_as_task_result(self.db, self.task, dest_group_id, dest_group_name) + logger.info("[create task] created new model(%s) as task result", model.name) + return {"model": jsonable_encoder(model)} + else: + logger.info("[create task] no task result record needed") + return {} + + def update( + self, + task_result: schemas.TaskUpdateStatus, + ) -> models.Task: + task_in_db = crud.task.get(self.db, id=self.task.id) + if not task_in_db: + logger.error( + "[update task] could not find target task (%s) to update, ignore", + self.task.id, + ) + raise TaskNotFound() + + if task_result.state in FinalStates: + logger.info( + "[update task] task reached final state(%s), handling result: %s", + task_result.state, + task_result, + ) + self.update_task_result(task_result, task_in_db) + + logger.info( + "[update task] updating task state %s and percent %s", + task_result.state, + task_result.percent, + ) + return crud.task.update_state_and_percent( + self.db, + task=task_in_db, + new_state=task_result.state, + state_code=task_result.state_code, + percent=task_result.percent, + ) + + def update_task_result(self, task_result: schemas.TaskUpdateStatus, task_in_db: models.Task) -> None: + if self.result_type is ResultType.dataset: + crud_func = crud.dataset + elif self.result_type is ResultType.model: + crud_func = crud.model # type: ignore + else: + logger.info("[update task] no task result to update") + return + + result_record = crud_func.get_by_task_id(self.db, task_id=self.task.id) + if not result_record: + logger.error("[update task] task result record not found, skip") + return + + if self.result_type is ResultType.model and self.model_info: + # special path for model + # as long as we can get model_info, set model as ready and + # save related task parameters and config accordingly + crud.task.update_parameters_and_config( + self.db, + task=task_in_db, + parameters=self.model_info.task_parameters, + config=json.dumps(self.model_info.executor_config), + ) + crud.model.finish(self.db, result_record.id, result_state=ResultState.ready, result=asdict(self.model_info)) + try: + self.save_model_stats(self.model_info) + except FailedToConnectClickHouse: + logger.exception("Failed to write model stats to clickhouse, continue anyway") + return + + if task_result.state is TaskState.done: + crud_func.finish( + self.db, + result_record.id, + result_state=ResultState.ready, + result=asdict(self.result_info), + ) + else: + crud_func.finish( + self.db, + result_record.id, + result_state=ResultState.error, + ) diff --git a/ymir/backend/src/ymir_app/app/main.py b/ymir/backend/src/ymir_app/app/main.py index ce24599c52..349ae33ca3 100644 --- a/ymir/backend/src/ymir_app/app/main.py +++ b/ymir/backend/src/ymir_app/app/main.py @@ -70,6 +70,8 @@ async def swagger_ui_redirect() -> HTMLResponse: @app.on_event("startup") async def startup() -> None: + if settings.REDIS_TESTING: + return redis = aioredis.from_url(settings.BACKEND_REDIS_URL, encoding="utf8", decode_responses=True) FastAPICache.init(RedisBackend(redis), prefix="ymir-app-cache") asyncio.create_task(redis_stream.consume(batch_update_task_status)) @@ -77,6 +79,8 @@ async def startup() -> None: @app.on_event("shutdown") async def shutdown() -> None: + if settings.REDIS_TESTING: + return asyncio.create_task(redis_stream.disconnect()) diff --git a/ymir/backend/src/ymir_app/app/models/dataset.py b/ymir/backend/src/ymir_app/app/models/dataset.py index 83ab9029fb..fd2fd6a812 100644 --- a/ymir/backend/src/ymir_app/app/models/dataset.py +++ b/ymir/backend/src/ymir_app/app/models/dataset.py @@ -36,6 +36,7 @@ class Dataset(Base): viewonly=True, ) + is_visible = Column(Boolean, default=True, nullable=False) is_deleted = Column(Boolean, default=False, nullable=False) create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) update_datetime = Column( diff --git a/ymir/backend/src/ymir_app/app/models/dataset_group.py b/ymir/backend/src/ymir_app/app/models/dataset_group.py index fd2151ff23..9abf082282 100644 --- a/ymir/backend/src/ymir_app/app/models/dataset_group.py +++ b/ymir/backend/src/ymir_app/app/models/dataset_group.py @@ -24,6 +24,12 @@ class DatasetGroup(Base): uselist=True, viewonly=True, ) + visible_datasets = relationship( + "Dataset", + primaryjoin="and_(foreign(Dataset.dataset_group_id)==DatasetGroup.id, foreign(Dataset.is_visible))", + uselist=True, + viewonly=True, + ) is_deleted = Column(Boolean, default=False, nullable=False) create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) @@ -33,3 +39,7 @@ class DatasetGroup(Base): onupdate=datetime.utcnow, nullable=False, ) + + @property + def is_visible(self) -> bool: + return bool(self.visible_datasets) diff --git a/ymir/backend/src/ymir_app/app/models/image.py b/ymir/backend/src/ymir_app/app/models/image.py index 2c8acf5636..4145821e28 100644 --- a/ymir/backend/src/ymir_app/app/models/image.py +++ b/ymir/backend/src/ymir_app/app/models/image.py @@ -15,7 +15,7 @@ class DockerImage(Base): name = Column(String(settings.STRING_LEN_LIMIT), index=True, nullable=False) url = Column(String(settings.STRING_LEN_LIMIT), index=True, nullable=False) hash = Column(String(settings.STRING_LEN_LIMIT), index=True) - description = Column(String(settings.STRING_LEN_LIMIT)) + description = Column(String(settings.LONG_STRING_LEN_LIMIT)) state = Column(Integer, index=True, nullable=False) related = relationship( "DockerImage", diff --git a/ymir/backend/src/ymir_app/app/models/iteration.py b/ymir/backend/src/ymir_app/app/models/iteration.py index 5f9164c1cc..43555608fd 100644 --- a/ymir/backend/src/ymir_app/app/models/iteration.py +++ b/ymir/backend/src/ymir_app/app/models/iteration.py @@ -1,4 +1,5 @@ from datetime import datetime +from typing import List from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String @@ -33,3 +34,18 @@ class Iteration(Base): onupdate=datetime.utcnow, nullable=False, ) + + @property + def referenced_dataset_ids(self) -> List[int]: + datasets = [ + self.mining_input_dataset_id, + self.mining_output_dataset_id, + self.label_output_dataset_id, + self.training_input_dataset_id, + self.testing_dataset_id, + ] + return [dataset for dataset in datasets if dataset is not None] + + @property + def referenced_model_ids(self) -> List[int]: + return [self.training_output_model_id] if self.training_output_model_id else [] diff --git a/ymir/backend/src/ymir_app/app/models/model.py b/ymir/backend/src/ymir_app/app/models/model.py index 94a4aa99f7..5e13c4ecaf 100644 --- a/ymir/backend/src/ymir_app/app/models/model.py +++ b/ymir/backend/src/ymir_app/app/models/model.py @@ -41,6 +41,7 @@ class Model(Base): viewonly=True, ) + is_visible = Column(Boolean, default=True, nullable=False) is_deleted = Column(Boolean, default=False, nullable=False) create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) update_datetime = Column( diff --git a/ymir/backend/src/ymir_app/app/models/model_group.py b/ymir/backend/src/ymir_app/app/models/model_group.py index 644d477494..ce2641467a 100644 --- a/ymir/backend/src/ymir_app/app/models/model_group.py +++ b/ymir/backend/src/ymir_app/app/models/model_group.py @@ -25,6 +25,12 @@ class ModelGroup(Base): uselist=True, viewonly=True, ) + visible_models = relationship( + "Model", + primaryjoin="and_(foreign(Model.model_group_id)==ModelGroup.id, foreign(Model.is_visible))", + uselist=True, + viewonly=True, + ) is_deleted = Column(Boolean, default=False, nullable=False) create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False) @@ -34,3 +40,7 @@ class ModelGroup(Base): onupdate=datetime.utcnow, nullable=False, ) + + @property + def is_visible(self) -> bool: + return bool(self.visible_models) diff --git a/ymir/backend/src/ymir_app/app/models/project.py b/ymir/backend/src/ymir_app/app/models/project.py index a5a455519b..6090d2080b 100644 --- a/ymir/backend/src/ymir_app/app/models/project.py +++ b/ymir/backend/src/ymir_app/app/models/project.py @@ -1,4 +1,5 @@ from datetime import datetime +from typing import List from sqlalchemy import ( Boolean, @@ -88,6 +89,12 @@ class Project(Base): uselist=False, viewonly=True, ) + iterations = relationship( + "Iteration", + primaryjoin="foreign(Iteration.project_id)==Project.id", + uselist=True, + viewonly=True, + ) is_example = Column(Boolean, default=False) is_deleted = Column(Boolean, default=False, nullable=False) @@ -106,3 +113,30 @@ def dataset_count(self) -> int: @property def model_count(self) -> int: return len(self.models) + + @property + def referenced_dataset_ids(self) -> List[int]: + """ + for each project, there are some resources that are required, including: + - project's testing dataset, mining dataset and initial training dataset + - datasets and models of current iteration + - all the training dataset of all the iterations + """ + project_dataset_ids = [self.testing_dataset_id, self.mining_dataset_id, self.initial_training_dataset_id] + current_iteration_dataset_ids = self.current_iteration.referenced_dataset_ids if self.current_iteration else [] + all_iterations_training_dataset_ids = [i.training_input_dataset_id for i in self.iterations] + dataset_ids = filter( + None, + project_dataset_ids + current_iteration_dataset_ids + all_iterations_training_dataset_ids, # type: ignore + ) + return list(set(dataset_ids)) + + @property + def referenced_model_ids(self) -> List[int]: + current_iteration_model_ids = self.current_iteration.referenced_model_ids if self.current_iteration else [] + all_iterations_training_model_ids = [i.training_output_model_id for i in self.iterations] + model_ids = filter( + None, + current_iteration_model_ids + [self.initial_model_id] + all_iterations_training_model_ids, # type: ignore + ) + return list(set(model_ids)) diff --git a/ymir/backend/src/ymir_app/app/schemas/__init__.py b/ymir/backend/src/ymir_app/app/schemas/__init__.py index bf4d073afc..26fa4c4d1f 100644 --- a/ymir/backend/src/ymir_app/app/schemas/__init__.py +++ b/ymir/backend/src/ymir_app/app/schemas/__init__.py @@ -84,6 +84,8 @@ TaskMonitorEvent, TaskMonitorEvents, TaskResultUpdateMessage, + BatchTasksCreate, + BatchTasksCreateResults, ) from .token import Token, TokenOut, TokenPayload from .user import ( @@ -96,4 +98,4 @@ UserState, UserUpdate, ) -from .common import RequestParameterBase +from .common import RequestParameterBase, BatchOperations diff --git a/ymir/backend/src/ymir_app/app/schemas/common.py b/ymir/backend/src/ymir_app/app/schemas/common.py index 52b319e4f8..72fbe35f43 100644 --- a/ymir/backend/src/ymir_app/app/schemas/common.py +++ b/ymir/backend/src/ymir_app/app/schemas/common.py @@ -1,5 +1,6 @@ from datetime import datetime -from typing import Optional +from enum import Enum +from typing import Optional, List from pydantic import BaseModel, Field, validator @@ -40,3 +41,18 @@ class IterationContext(BaseModel): class RequestParameterBase(BaseModel): iteration_context: Optional[IterationContext] project_id: int + + +class OperationAction(str, Enum): + hide = "hide" + unhide = "unhide" + + +class Operation(BaseModel): + action: OperationAction = Field(example="hide") + id_: int = Field(alias="id") + + +class BatchOperations(BaseModel): + project_id: int + operations: List[Operation] diff --git a/ymir/backend/src/ymir_app/app/schemas/dataset.py b/ymir/backend/src/ymir_app/app/schemas/dataset.py index 01227aee8e..534eeeb1bb 100644 --- a/ymir/backend/src/ymir_app/app/schemas/dataset.py +++ b/ymir/backend/src/ymir_app/app/schemas/dataset.py @@ -98,6 +98,7 @@ class DatasetInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, Dat task_id: int user_id: int related_task: Optional[TaskInternal] + is_visible: bool class Config: orm_mode = True @@ -152,3 +153,15 @@ class DatasetsFusionParameter(RequestParameterBase): exclude_labels: List[str] sampling_count: int = 0 + + +class DatasetEvaluationCreate(BaseModel): + project_id: int + gt_dataset_id: int + other_dataset_ids: List[int] + confidence_threshold: float + + +class DatasetEvaluationOut(Common): + # dict of dataset_id to evaluation result + result: Dict[int, Dict] diff --git a/ymir/backend/src/ymir_app/app/schemas/dataset_group.py b/ymir/backend/src/ymir_app/app/schemas/dataset_group.py index ada9c492d7..a417786e05 100644 --- a/ymir/backend/src/ymir_app/app/schemas/dataset_group.py +++ b/ymir/backend/src/ymir_app/app/schemas/dataset_group.py @@ -28,6 +28,7 @@ class DatasetGroupUpdate(BaseModel): class DatasetGroupInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, DatasetGroupBase): datasets: List[Dataset] + is_visible: bool class Config: orm_mode = True diff --git a/ymir/backend/src/ymir_app/app/schemas/model.py b/ymir/backend/src/ymir_app/app/schemas/model.py index 9fbaf39813..4f251c0168 100644 --- a/ymir/backend/src/ymir_app/app/schemas/model.py +++ b/ymir/backend/src/ymir_app/app/schemas/model.py @@ -63,6 +63,7 @@ class ModelInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, Model group_name: str version_num: int related_task: Optional[TaskInternal] + is_visible: bool class Config: orm_mode = True diff --git a/ymir/backend/src/ymir_app/app/schemas/model_group.py b/ymir/backend/src/ymir_app/app/schemas/model_group.py index df63240189..600c8daa1c 100644 --- a/ymir/backend/src/ymir_app/app/schemas/model_group.py +++ b/ymir/backend/src/ymir_app/app/schemas/model_group.py @@ -27,6 +27,7 @@ class ModelGroupUpdate(BaseModel): class ModelGroupInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, ModelGroupBase): models: List[Model] + is_visible: bool class Config: orm_mode = True diff --git a/ymir/backend/src/ymir_app/app/schemas/project.py b/ymir/backend/src/ymir_app/app/schemas/project.py index ad13c0d1af..a0f4436bfa 100644 --- a/ymir/backend/src/ymir_app/app/schemas/project.py +++ b/ymir/backend/src/ymir_app/app/schemas/project.py @@ -56,10 +56,21 @@ class ProjectUpdate(BaseModel): initial_model_id: Optional[int] initial_training_dataset_id: Optional[int] + training_keywords: Optional[List[str]] + class Config: use_enum_values = True validate_all = True + @validator("training_keywords") + def pack_keywords(cls, v: Optional[List[str]]) -> Optional[str]: + """ + serialize training keywords for db + """ + if v is not None: + return json.dumps(v) + return v + class ProjectInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, ProjectBase): training_dataset_group_id: Optional[int] @@ -73,6 +84,9 @@ class ProjectInDBBase(IdModelMixin, DateTimeModelMixin, IsDeletedModelMixin, Pro testing_dataset: Optional[Dataset] mining_dataset: Optional[Dataset] + referenced_model_ids: List[int] + referenced_dataset_ids: List[int] + class Config: orm_mode = True @@ -104,3 +118,11 @@ class ProjectPagination(BaseModel): class ProjectPaginationOut(Common): result: ProjectPagination + + +class ProjectStatus(BaseModel): + is_dirty: bool + + +class ProjectStatusOut(Common): + result: ProjectStatus diff --git a/ymir/backend/src/ymir_app/app/schemas/task.py b/ymir/backend/src/ymir_app/app/schemas/task.py index 3f26603b49..fa0fbaedc8 100644 --- a/ymir/backend/src/ymir_app/app/schemas/task.py +++ b/ymir/backend/src/ymir_app/app/schemas/task.py @@ -74,6 +74,10 @@ class Config: use_enum_values = True +class BatchTasksCreate(BaseModel): + payloads: List[TaskCreate] + + class TaskUpdate(BaseModel): name: str @@ -251,6 +255,14 @@ class TaskOut(Common): result: Task +class TasksOut(Common): + result: List[Task] + + +class BatchTasksCreateResults(Common): + result: List[Optional[Task]] + + class TaskPagination(BaseModel): total: int items: List[Task] diff --git a/ymir/backend/src/ymir_app/app/utils/files.py b/ymir/backend/src/ymir_app/app/utils/files.py index 6df79b7dd9..02a22cc295 100644 --- a/ymir/backend/src/ymir_app/app/utils/files.py +++ b/ymir/backend/src/ymir_app/app/utils/files.py @@ -100,9 +100,17 @@ def locate_dir(p: Union[str, Path], target: str) -> Path: Locate specifc target dirs """ for _p in Path(p).iterdir(): + if _p.is_dir() and _p.name.lower() == target: + return _p for __p in _p.iterdir(): if __p.is_dir() and __p.name.lower() == target: return __p + # Only search 3rd depth when no result was found in 2nd depth. + for _p in Path(p).iterdir(): + for __p in _p.iterdir(): + for ___p in __p.iterdir(): + if ___p.is_dir() and ___p.name.lower() == target: + return ___p raise FileNotFoundError() diff --git a/ymir/backend/src/ymir_app/app/utils/ymir_controller.py b/ymir/backend/src/ymir_app/app/utils/ymir_controller.py index d4872102c5..de438a3902 100644 --- a/ymir/backend/src/ymir_app/app/utils/ymir_controller.py +++ b/ymir/backend/src/ymir_app/app/utils/ymir_controller.py @@ -9,6 +9,7 @@ from fastapi.logger import logger from google.protobuf import json_format # type: ignore +from app.config import settings from app.constants.state import TaskType from app.schemas.dataset import ImportStrategy, MergeStrategy from common_utils.labels import UserLabels @@ -27,6 +28,9 @@ class ExtraRequestType(enum.IntEnum): pull_image = 600 get_gpu_info = 601 create_user = 602 + evaluate = 603 + check_repo = 604 + fix_repo = 605 MERGE_STRATEGY_MAPPING = { @@ -86,8 +90,6 @@ def prepare_create_user(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsv return request def prepare_create_project(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: - # project training target labels - request.in_class_ids[:] = args["training_classes"] request.req_type = mirsvrpb.REPO_CREATE return request @@ -286,9 +288,29 @@ def prepare_dataset_infer(self, request: mirsvrpb.GeneralReq, args: Dict) -> mir # need different app type for web, controller use same endpoint return self.prepare_mining(request, args) + def prepare_evaluate(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + evaluate_config = mirsvrpb.EvaluateConfig() + evaluate_config.conf_thr = args["confidence_threshold"] + evaluate_config.iou_thrs_interval = "0.5:1:0.05" + evaluate_config.need_pr_curve = False + + request.req_type = mirsvrpb.CMD_EVALUATE + request.singleton_op = args["gt_dataset_hash"] + request.in_dataset_ids[:] = args["other_dataset_hashes"] + request.evaluate_config.CopyFrom(evaluate_config) + return request + + def prepare_check_repo(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.req_type = mirsvrpb.CMD_REPO_CHECK + return request + + def prepare_fix_repo(self, request: mirsvrpb.GeneralReq, args: Dict) -> mirsvrpb.GeneralReq: + request.req_type = mirsvrpb.CMD_REPO_CLEAR + return request + class ControllerClient: - def __init__(self, channel: str) -> None: + def __init__(self, channel: str = settings.GRPC_CHANNEL) -> None: self.channel = grpc.insecure_channel(channel) self.stub = mir_grpc.mir_controller_serviceStub(self.channel) @@ -379,13 +401,12 @@ def create_user(self, user_id: int) -> Dict: req = ControllerRequest(type=ExtraRequestType.create_user, user_id=user_id) return self.send(req) - def create_project(self, user_id: int, project_id: int, task_id: str, args: Dict) -> Dict: + def create_project(self, user_id: int, project_id: int, task_id: str) -> Dict: req = ControllerRequest( type=ExtraRequestType.create_project, user_id=user_id, project_id=project_id, task_id=task_id, - args=args, ) return self.send(req) @@ -402,6 +423,7 @@ def import_dataset(self, user_id: int, project_id: int, task_hash: str, task_typ def call_inference( self, user_id: int, + project_id: int, model_hash: Optional[str], asset_dir: str, docker_image: Optional[str], @@ -412,6 +434,7 @@ def call_inference( req = ControllerRequest( type=ExtraRequestType.inference, user_id=user_id, + project_id=project_id, args={ "model_hash": model_hash, "asset_dir": asset_dir, @@ -443,3 +466,42 @@ def import_model(self, user_id: int, project_id: int, task_id: str, task_type: A args=args, ) return self.send(req) + + def evaluate_dataset( + self, + user_id: int, + project_id: int, + task_id: str, + confidence_threshold: float, + gt_dataset_hash: str, + other_dataset_hashes: List[str], + ) -> Dict: + req = ControllerRequest( + type=ExtraRequestType.evaluate, + user_id=user_id, + project_id=project_id, + task_id=task_id, + args={ + "confidence_threshold": confidence_threshold, + "gt_dataset_hash": gt_dataset_hash, + "other_dataset_hashes": other_dataset_hashes, + }, + ) + return self.send(req) + + def check_repo_status(self, user_id: int, project_id: int) -> bool: + req = ControllerRequest( + type=ExtraRequestType.check_repo, + user_id=user_id, + project_id=project_id, + ) + resp = self.send(req) + return resp["ops_ret"] + + def fix_repo(self, user_id: int, project_id: int) -> Dict: + req = ControllerRequest( + type=ExtraRequestType.fix_repo, + user_id=user_id, + project_id=project_id, + ) + return self.send(req) diff --git a/ymir/backend/src/ymir_app/app/utils/ymir_viz.py b/ymir/backend/src/ymir_app/app/utils/ymir_viz.py index e9219e1d8f..a3352cd0dd 100644 --- a/ymir/backend/src/ymir_app/app/utils/ymir_viz.py +++ b/ymir/backend/src/ymir_app/app/utils/ymir_viz.py @@ -5,7 +5,7 @@ from fastapi.logger import logger from pydantic import BaseModel -from app.api.errors.errors import ModelNotFound, ModelNotReady +from app.api.errors.errors import DatasetEvaluationNotFound, ModelNotFound, ModelNotReady from app.config import settings from common_utils.labels import UserLabels from id_definition.error_codes import VizErrorCode @@ -79,7 +79,7 @@ def from_viz_res(cls, res: Dict) -> "ModelMetaData": class VizDataset(BaseModel): """ Interface dataclass of VIZ output, defined as DatasetResult in doc: - https://github.com/IndustryEssentials/ymir/blob/master/ymir/backend/src/ymir-viz/doc/ymir_viz_API.yaml + https://github.com/IndustryEssentials/ymir/blob/master/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml """ total_images_cnt: int @@ -111,6 +111,29 @@ def from_viz_res(cls, res: Dict, user_labels: UserLabels) -> "DatasetMetaData": ) +class EvaluationScore(BaseModel): + ap: float + ar: float + fn: int + fp: int + tp: int + + +class VizDatasetEvaluation(BaseModel): + ci_evaluations: Dict[int, EvaluationScore] # class_id -> scores + ci_averaged_evaluation: EvaluationScore + + +class VizDatasetEvaluationResult(BaseModel): + """ + Interface dataclass of VIZ output, defined as DatasetEvaluationResult in doc: + https://github.com/IndustryEssentials/ymir/blob/master/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml + """ + + iou_evaluations: Dict[float, VizDatasetEvaluation] # iou -> evaluation + iou_averaged_evaluation: VizDatasetEvaluation + + class VizClient: def __init__(self, *, host: str = settings.VIZ_HOST): self.host = host @@ -118,6 +141,7 @@ def __init__(self, *, host: str = settings.VIZ_HOST): self._user_id = None # type: Optional[str] self._project_id = None # type: Optional[str] self._branch_id = None # type: Optional[str] + self._url_prefix = None # type: Optional[str] def initialize( self, @@ -129,6 +153,7 @@ def initialize( self._user_id = f"{user_id:0>4}" self._project_id = f"{project_id:0>6}" self._branch_id = branch_id + self._url_prefix = f"http://{self.host}/v1/users/{self._user_id}/repositories/{self._project_id}/branches/{self._branch_id}" # noqa: E501 def get_assets( self, @@ -138,8 +163,7 @@ def get_assets( limit: int = 20, user_labels: UserLabels, ) -> Assets: - url = f"http://{self.host}/v1/users/{self._user_id}/repositories/{self._project_id}/branches/{self._branch_id}/assets" # noqa: E501 - + url = f"{self._url_prefix}/assets" payload = {"class_id": keyword_id, "limit": limit, "offset": offset} resp = self.session.get(url, params=payload, timeout=settings.VIZ_TIMEOUT) if not resp.ok: @@ -155,8 +179,7 @@ def get_asset( asset_id: str, user_labels: UserLabels, ) -> Optional[Dict]: - url = f"http://{self.host}/v1/users/{self._user_id}/repositories/{self._project_id}/branches/{self._branch_id}/assets/{asset_id}" # noqa: E501 - + url = f"{self._url_prefix}/assets/{asset_id}" resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) if not resp.ok: logger.error("[viz] failed to get asset info: %s", resp.content) @@ -165,17 +188,27 @@ def get_asset( return asdict(Asset.from_viz_res(asset_id, res, user_labels)) def get_model(self) -> ModelMetaData: - url = f"http://{self.host}/v1/users/{self._user_id}/repositories/{self._project_id}/branches/{self._branch_id}/models" # noqa: E501 + url = f"{self._url_prefix}/models" resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) res = self.parse_resp(resp) return ModelMetaData.from_viz_res(res) def get_dataset(self, user_labels: UserLabels) -> DatasetMetaData: - url = f"http://{self.host}/v1/users/{self._user_id}/repositories/{self._project_id}/branches/{self._branch_id}/datasets" # noqa: E501 + url = f"{self._url_prefix}/datasets" resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) res = self.parse_resp(resp) return DatasetMetaData.from_viz_res(res, user_labels) + def get_evaluations(self, user_labels: UserLabels) -> Dict: + url = f"{self._url_prefix}/evaluations" + resp = self.session.get(url, timeout=settings.VIZ_TIMEOUT) + res = self.parse_resp(resp) + evaluations = { + dataset_hash: VizDatasetEvaluationResult(**evaluation).dict() for dataset_hash, evaluation in res.items() + } + convert_class_id_to_keyword(evaluations, user_labels) + return evaluations + def parse_resp(self, resp: requests.Response) -> Dict: """ response falls in three categories: @@ -190,6 +223,8 @@ def parse_resp(self, resp: requests.Response) -> Dict: error_code = resp.json()["code"] if error_code == VizErrorCode.MODEL_NOT_EXISTS: raise ModelNotFound() + elif error_code == VizErrorCode.DATASET_EVALUATION_NOT_EXISTS: + raise DatasetEvaluationNotFound() raise ModelNotReady() def close(self) -> None: @@ -198,3 +233,12 @@ def close(self) -> None: def get_asset_url(asset_id: str) -> str: return f"{settings.NGINX_PREFIX}/ymir-assets/{asset_id}" + + +def convert_class_id_to_keyword(obj: Dict, user_labels: UserLabels) -> None: + if isinstance(obj, dict): + for key, value in obj.items(): + if key == "ci_evaluations": + obj[key] = {user_labels.get_main_names(k)[0]: v for k, v in value.items()} + else: + convert_class_id_to_keyword(obj[key], user_labels) diff --git a/ymir/backend/src/ymir_app/prestart.sh b/ymir/backend/src/ymir_app/prestart.sh index b3763e16c3..ab4784797a 100644 --- a/ymir/backend/src/ymir_app/prestart.sh +++ b/ymir/backend/src/ymir_app/prestart.sh @@ -14,3 +14,6 @@ python app/initial_data.py # Clean legacy tasks python app/clean_tasks.py + +# Fix dirty repos +python app/fix_dirty_repos.py diff --git a/ymir/backend/src/ymir_app/tests/api/test_dataset.py b/ymir/backend/src/ymir_app/tests/api/test_dataset.py index 55fe6b545b..5cd5ce0cc5 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_dataset.py +++ b/ymir/backend/src/ymir_app/tests/api/test_dataset.py @@ -90,6 +90,7 @@ def test_list_datasets_given_ids( class TestCreateDataset: def test_create_dataset_succeed(self, client: TestClient, normal_user_token_headers, mocker): + mocker.patch.object(m, "import_dataset_in_background") j = { "group_name": random_lower_string(), "version_num": random.randint(100, 200), diff --git a/ymir/backend/src/ymir_app/tests/api/test_dataset_group.py b/ymir/backend/src/ymir_app/tests/api/test_dataset_group.py index bdc6a5b48c..11780841e3 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_dataset_group.py +++ b/ymir/backend/src/ymir_app/tests/api/test_dataset_group.py @@ -5,7 +5,7 @@ from sqlalchemy.orm import Session from app.config import settings -from tests.utils.datasets import create_dataset_group_record +from tests.utils.datasets import create_dataset_group_record, create_dataset_record from tests.utils.utils import random_lower_string @@ -19,16 +19,20 @@ def test_list_dataset_groups_with_results( mocker, ): project_id = randint(1000, 2000) - for _ in range(3): - create_dataset_group_record(db, user_id, project_id) + for idx in range(3): + grp = create_dataset_group_record(db, user_id, project_id) + if idx == 2: + create_dataset_record(db, user_id, project_id, grp.id) r = client.get( f"{settings.API_V1_STR}/dataset_groups/", headers=normal_user_token_headers, params={"project_id": project_id}, ) items = r.json()["result"]["items"] + for item in items: + assert item["is_visible"] total = r.json()["result"]["total"] - assert len(items) == total == 3 + assert len(items) == total == 1 class TestDeleteDatasetGroup: diff --git a/ymir/backend/src/ymir_app/tests/api/test_image.py b/ymir/backend/src/ymir_app/tests/api/test_image.py index e721428613..9dbfbcf8da 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_image.py +++ b/ymir/backend/src/ymir_app/tests/api/test_image.py @@ -1,7 +1,10 @@ +from typing import Any from fastapi.testclient import TestClient +from app.api.api_v1.endpoints import images as m from app.api.errors.errors import error_codes from app.config import settings +from app.constants.state import DockerImageType from tests.utils.utils import random_lower_string @@ -110,3 +113,12 @@ def test_get_image_relationship_success(self, client: TestClient, admin_token_he for relationship, dest_image_id in zip(result, dest_image_ids): assert relationship["src_image_id"] == src_image_id assert relationship["dest_image_id"] == dest_image_id + + +def test_parse(mocker: Any) -> None: + config = { + DockerImageType.mining: {"A": 1}, + DockerImageType.infer: {"B": 2}, + } + res = list(m.parse_docker_image_config(config)) + assert len(res) == 2 diff --git a/ymir/backend/src/ymir_app/tests/api/test_model_group.py b/ymir/backend/src/ymir_app/tests/api/test_model_group.py index a4748831d7..c85b06f289 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_model_group.py +++ b/ymir/backend/src/ymir_app/tests/api/test_model_group.py @@ -5,7 +5,7 @@ from sqlalchemy.orm import Session from app.config import settings -from tests.utils.models import create_model_group_record +from tests.utils.models import create_model_group_record, create_model from tests.utils.utils import random_lower_string @@ -20,7 +20,8 @@ def test_list_model_groups_with_results( ): project_id = randint(1000, 2000) for _ in range(3): - create_model_group_record(db, user_id, project_id) + grp = create_model_group_record(db, user_id, project_id) + create_model(db, user_id, grp.id) r = client.get( f"{settings.API_V1_STR}/model_groups/", headers=normal_user_token_headers, params={"project_id": project_id} ) diff --git a/ymir/backend/src/ymir_app/tests/api/test_task.py b/ymir/backend/src/ymir_app/tests/api/test_task.py index 77ac60ed51..994701f2a8 100644 --- a/ymir/backend/src/ymir_app/tests/api/test_task.py +++ b/ymir/backend/src/ymir_app/tests/api/test_task.py @@ -7,7 +7,6 @@ from app.api.api_v1.api import tasks as m from app.config import settings -from common_utils.labels import UserLabels from tests.utils.tasks import create_task from tests.utils.utils import random_lower_string @@ -61,56 +60,6 @@ def mock_clickhouse(mocker): return mocker.Mock() -def test_get_default_dataset_name(): - task_hash = random_lower_string(32) - task_name = random_lower_string(10) - assert m.get_default_record_name(task_hash, task_name) == task_name + "_" + task_hash[-6:] - - -class TestNormalizeParameters: - def test_normalize_task_parameters_succeed(self, mocker): - mocker.patch.object(m, "crud") - params = { - "keywords": "cat,dog,boy".split(","), - "dataset_id": 1, - "model_id": 233, - "name": random_lower_string(5), - "else": None, - } - user_labels = UserLabels.parse_obj( - dict( - labels=[ - { - "name": "cat", - "aliases": [], - "create_time": 1647075205.0, - "update_time": 1647075206.0, - "id": 0, - }, - { - "id": 1, - "name": "dog", - "aliases": [], - "create_time": 1647076207.0, - "update_time": 1647076408.0, - }, - { - "id": 2, - "name": "boy", - "aliases": [], - "create_time": 1647076209.0, - "update_time": 1647076410.0, - }, - ] - ) - ) - params = m.schemas.TaskParameter(**params) - res = m.normalize_parameters(mocker.Mock(), params, None, user_labels) - assert res["class_ids"] == [0, 1, 2] - assert "dataset_hash" in res - assert "model_hash" in res - - class TestListTasks: def test_list_tasks_succeed( self, diff --git a/ymir/backend/src/ymir_app/tests/crud/test_dataset.py b/ymir/backend/src/ymir_app/tests/crud/test_dataset.py index 7074a25309..5a138fbc23 100644 --- a/ymir/backend/src/ymir_app/tests/crud/test_dataset.py +++ b/ymir/backend/src/ymir_app/tests/crud/test_dataset.py @@ -4,8 +4,10 @@ from app import crud from app.schemas.dataset import DatasetCreate +from app.constants.state import ResultState from tests.utils.utils import random_hash from tests.utils.dataset_groups import create_dataset_group_record +from tests.utils.datasets import create_dataset_record def test_create_dataset(db: Session) -> None: @@ -49,3 +51,45 @@ def test_get_dataset(db: Session) -> None: assert dataset.hash == stored_dataset.hash assert dataset.name == stored_dataset.name + + +class TestToggleVisibility: + def test_toggle_dataset(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + dataset = create_dataset_record(db, user_id, project_id) + assert dataset.is_visible + + crud.dataset.batch_toggle_visibility(db, ids=[dataset.id], action="hide") + dataset = crud.dataset.get(db, id=dataset.id) + assert not dataset.is_visible + + crud.dataset.batch_toggle_visibility(db, ids=[dataset.id], action="unhide") + dataset = crud.dataset.get(db, id=dataset.id) + assert dataset.is_visible + + +class TestDeleteGroupResources: + def test_delete_group_resources(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + group = create_dataset_group_record(db, user_id, project_id) + dataset1 = create_dataset_record(db, user_id, project_id, group.id) + dataset2 = create_dataset_record(db, user_id, project_id, group.id) + assert not dataset1.is_deleted + assert not dataset2.is_deleted + + crud.dataset.remove_group_resources(db, group_id=group.id) + assert dataset1.is_deleted + assert dataset2.is_deleted + + +class TestUpdateDatasetState: + def test_update_dataset_state(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + dataset = create_dataset_record(db, user_id, project_id, state_=ResultState.processing) + assert dataset.result_state == ResultState.processing + + crud.dataset.update_state(db, dataset_id=dataset.id, new_state=ResultState.ready) + assert dataset.result_state == ResultState.ready diff --git a/ymir/backend/src/ymir_app/tests/crud/test_model.py b/ymir/backend/src/ymir_app/tests/crud/test_model.py index 3d6d88458d..071b19766b 100644 --- a/ymir/backend/src/ymir_app/tests/crud/test_model.py +++ b/ymir/backend/src/ymir_app/tests/crud/test_model.py @@ -4,8 +4,9 @@ from app import crud from app.schemas.model import ModelCreate +from app.constants.state import ResultState from tests.utils.utils import random_hash -from tests.utils.models import create_model_group_record +from tests.utils.models import create_model_group_record, create_model def test_create_model(db: Session) -> None: @@ -13,7 +14,7 @@ def test_create_model(db: Session) -> None: project_id = randint(1000, 2000) group = create_model_group_record(db, user_id, project_id) - model_hash = random_hash("dataset") + model_hash = random_hash("model") model_in = ModelCreate( db=db, hash=model_hash, @@ -72,3 +73,45 @@ def test_delete_model(db: Session): model = crud.model.soft_remove(db, id=model.id) deleted_model = crud.model.get(db=db, id=model.id) assert deleted_model.is_deleted + + +class TestToggleModelVisibility: + def test_toggle_model(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + model = create_model(db, user_id, project_id) + assert model.is_visible + + crud.model.batch_toggle_visibility(db, ids=[model.id], action="hide") + model = crud.model.get(db, id=model.id) + assert not model.is_visible + + crud.model.batch_toggle_visibility(db, ids=[model.id], action="unhide") + model = crud.model.get(db, id=model.id) + assert model.is_visible + + +class TestDeleteModelGroupResources: + def test_delete_model_group_resources(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + group = create_model_group_record(db, user_id, project_id) + model1 = create_model(db, user_id, group.id, project_id) + model2 = create_model(db, user_id, group.id, project_id) + assert not model1.is_deleted + assert not model2.is_deleted + + crud.model.remove_group_resources(db, group_id=group.id) + assert model1.is_deleted + assert model2.is_deleted + + +class TestUpdateModelState: + def test_update_model_state(self, db: Session) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + model = create_model(db, user_id, project_id) + assert model.result_state == ResultState.processing + + crud.model.update_state(db, model_id=model.id, new_state=ResultState.ready) + assert model.result_state == ResultState.ready diff --git a/ymir/backend/src/ymir_app/tests/crud/test_project.py b/ymir/backend/src/ymir_app/tests/crud/test_project.py new file mode 100644 index 0000000000..06f3b2def8 --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/crud/test_project.py @@ -0,0 +1,32 @@ +from typing import Any +from random import randint + +from sqlalchemy.orm import Session + +from app import crud, schemas +from tests.utils.projects import create_project_record +from tests.utils.utils import random_lower_string + + +def test_get_all_projects(db: Session, mocker: Any) -> None: + user_id = randint(100, 200) + project = create_project_record(db, user_id) + projects = crud.project.get_all_projects(db, limit=100) + assert project.id in [p.id for p in projects] + + +def test_create_project(db: Session, mocker: Any) -> None: + user_id = randint(100, 200) + j = {"name": random_lower_string(), "training_keywords": [random_lower_string() for _ in range(3)]} + in_ = schemas.ProjectCreate(**j) + record = crud.project.create_project(db, obj_in=in_, user_id=user_id) + fetched_record = crud.project.get(db, id=record.id) + assert record.name == fetched_record.name + + +def test_get_multiple_projects(db: Session, mocker: Any) -> None: + user_id = randint(1000, 2000) + for i in range(3): + create_project_record(db, user_id, name=f"prefix_{i}") + _, count = crud.project.get_multi_projects(db, user_id=user_id, name="prefix_") + assert count == 3 diff --git a/ymir/backend/src/ymir_app/tests/libs/test_datasets.py b/ymir/backend/src/ymir_app/tests/libs/test_datasets.py new file mode 100644 index 0000000000..6f9875f55f --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/libs/test_datasets.py @@ -0,0 +1,32 @@ +from typing import Any +from random import randint +from app.libs import datasets as m +from tests.utils.utils import random_lower_string + + +class TestImportDatasetPaths: + def test_import_dataset_paths(self, mocker: Any) -> None: + mocker.patch.object(m, "verify_import_path", return_value=True) + input_path = random_lower_string() + p = m.ImportDatasetPaths(input_path, random_lower_string()) + assert p.annotation_dir == f"{input_path}/annotations" + assert p.asset_dir == f"{input_path}/images" + + +class TestEvaluateDataset: + def test_evaluate_dataset(self, mocker: Any) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + confidence_threshold = 0.233 + ctrl = mocker.Mock() + viz = mocker.Mock() + viz.get_evaluations.return_value = {} + user_labels = mocker.Mock() + gt_dataset = mocker.Mock() + other_datasets = [mocker.Mock()] + m.evaluate_dataset( + ctrl, viz, user_id, project_id, user_labels, confidence_threshold, gt_dataset, other_datasets + ) + + ctrl.evaluate_dataset.assert_called() + viz.get_evaluations.assert_called() diff --git a/ymir/backend/src/ymir_app/tests/libs/test_models.py b/ymir/backend/src/ymir_app/tests/libs/test_models.py new file mode 100644 index 0000000000..02793aa7dd --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/libs/test_models.py @@ -0,0 +1,24 @@ +from random import randint +from typing import Any + +from sqlalchemy.orm import Session + +from app.libs import models as m +from app.constants.state import ResultState +from tests.utils.utils import random_lower_string +from tests.utils.models import create_model + + +class TestImportModel: + def test_import_model_in_background(self, db: Session, mocker: Any) -> None: + ctrl = mocker.Mock() + model_import = mocker.Mock() + user_id = randint(100, 200) + model = create_model(db, user_id) + assert model.result_state == ResultState.processing + task_hash = random_lower_string() + mock_import = mocker.Mock(side_effect=ValueError) + m._import_model = mock_import + m.import_model_in_background(db, ctrl, model_import, user_id, task_hash, model.id) + assert model.result_state == ResultState.error + mock_import.assert_called() diff --git a/ymir/backend/src/ymir_app/tests/libs/test_projects.py b/ymir/backend/src/ymir_app/tests/libs/test_projects.py new file mode 100644 index 0000000000..9732419147 --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/libs/test_projects.py @@ -0,0 +1,32 @@ +from random import randint +from typing import Any + +from sqlalchemy.orm import Session + +from app.libs import projects as m +from app.constants.state import ResultState, TaskType +from tests.utils.utils import random_lower_string + + +class TestSetupDatasetAndGroup: + def test_setup_dataset_and_group(self, db: Session, mocker: Any) -> None: + ctrl = mocker.Mock() + group_name = random_lower_string() + user_id = randint(100, 200) + project_id = randint(1000, 2000) + task_type = TaskType.import_data + + dataset = m.setup_dataset_and_group(db, ctrl, group_name, project_id, user_id, task_type) + assert dataset.result_state == ResultState.processing + + +class TestSetupModelAndGroup: + def test_setup_model_and_group(self, db: Session, mocker: Any) -> None: + ctrl = mocker.Mock() + group_name = random_lower_string() + user_id = randint(100, 200) + project_id = randint(1000, 2000) + task_type = TaskType.import_data + + model = m.setup_model_and_group(db, ctrl, group_name, project_id, user_id, task_type) + assert model.result_state == ResultState.processing diff --git a/ymir/backend/src/ymir_app/tests/libs/test_tasks.py b/ymir/backend/src/ymir_app/tests/libs/test_tasks.py new file mode 100644 index 0000000000..7e7a0c6e37 --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/libs/test_tasks.py @@ -0,0 +1,166 @@ +import pytest +from random import randint +from typing import Any + +from sqlalchemy.orm import Session + +from app.libs import tasks as m +from tests.utils.utils import random_lower_string +from common_utils.labels import UserLabels +from app.constants.state import TaskType +from tests.utils.tasks import create_task +from tests.utils.datasets import create_dataset_record, create_dataset_group_record +from tests.utils.models import create_model_group_record + + +class TestNormalizeParameters: + def test_normalize_task_parameters_succeed(self, mocker: Any) -> Any: + mocker.patch.object(m, "crud") + params = { + "keywords": "cat,dog,boy".split(","), + "dataset_id": 1, + "model_id": 233, + "name": random_lower_string(5), + "else": None, + } + user_labels = UserLabels.parse_obj( + dict( + labels=[ + { + "name": "cat", + "aliases": [], + "create_time": 1647075205.0, + "update_time": 1647075206.0, + "id": 0, + }, + { + "id": 1, + "name": "dog", + "aliases": [], + "create_time": 1647076207.0, + "update_time": 1647076408.0, + }, + { + "id": 2, + "name": "boy", + "aliases": [], + "create_time": 1647076209.0, + "update_time": 1647076410.0, + }, + ] + ) + ) + params = m.schemas.TaskParameter(**params) + res = m.normalize_parameters(mocker.Mock(), params, None, user_labels) + assert res["class_ids"] == [0, 1, 2] + assert "dataset_hash" in res + assert "model_hash" in res + + +class TestWriteClickhouseMetrics: + def test_write_clickhouse_metrics(self, mocker: Any) -> None: + ch = mocker.Mock() + mocker.patch.object(m, "YmirClickHouse", return_value=ch) + task_info = mocker.Mock(type=TaskType.training.value) + dataset_id = randint(100, 200) + dataset_group_id = randint(1000, 2000) + model_id = randint(10000, 20000) + keywords = [random_lower_string() for _ in range(3)] + + m.write_clickhouse_metrics(task_info, dataset_group_id, dataset_id, model_id, keywords) + ch.save_task_parameter.assert_called() + ch.save_dataset_keyword.assert_called() + + +class TestCreateSingleTask: + def test_create_single_task(self, db: Session, mocker: Any) -> None: + mocker.patch.object(m, "normalize_parameters") + ctrl = mocker.Mock() + mocker.patch.object(m, "ControllerClient", return_value=ctrl) + mocker.patch.object(m, "YmirClickHouse") + user_id = randint(100, 200) + project_id = randint(1000, 2000) + user_labels = mocker.Mock() + j = { + "name": random_lower_string(), + "type": TaskType.training.value, + "project_id": project_id, + "parameters": {"dataset_id": randint(100, 200)}, + } + task_in = m.schemas.TaskCreate(**j) + task = m.create_single_task(db, user_id, user_labels, task_in) + assert task.type == TaskType.training + assert task.project_id == project_id + ctrl.create_task.assert_called() + + +class TestTaskResult: + def test_task_result_propriety(self, db: Session, mocker: Any) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + task_in_db = create_task(db, user_id, project_id) + + ctrl = mocker.Mock() + mocker.patch.object(m, "ControllerClient", return_value=ctrl) + viz = mocker.Mock() + mocker.patch.object(m, "VizClient", return_value=viz) + + tr = m.TaskResult(db, task_in_db) + ctrl.get_labels_of_user.assert_not_called() + viz.get_model.assert_not_called() + viz.get_dataset.assert_not_called() + + tr.user_labels + ctrl.get_labels_of_user.assert_called() + + tr.model_info + viz.get_model.assert_called() + tr.dataset_info + viz.get_dataset.assert_called() + + def test_get_dest_group_info_is_dataset(self, db: Session, mocker: Any) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + task_in_db = create_task(db, user_id, project_id) + + ctrl = mocker.Mock() + mocker.patch.object(m, "ControllerClient", return_value=ctrl) + viz = mocker.Mock() + mocker.patch.object(m, "VizClient", return_value=viz) + + tr = m.TaskResult(db, task_in_db) + group = create_dataset_group_record(db, user_id, project_id) + dataset = create_dataset_record(db, user_id, project_id, dataset_group_id=group.id) + + result_group_id, result_group_name = tr.get_dest_group_info(dataset.id) + assert group.id == result_group_id + assert group.name == result_group_name + + def test_get_dest_group_info_is_model(self, db: Session, mocker: Any) -> None: + user_id = randint(100, 200) + project_id = randint(1000, 2000) + task_in_db = create_task(db, user_id, project_id, type_=TaskType.training) + + ctrl = mocker.Mock() + mocker.patch.object(m, "ControllerClient", return_value=ctrl) + viz = mocker.Mock() + mocker.patch.object(m, "VizClient", return_value=viz) + + tr = m.TaskResult(db, task_in_db) + dataset_group = create_dataset_group_record(db, user_id, project_id) + dataset = create_dataset_record(db, user_id, project_id, dataset_group_id=dataset_group.id) + model_group = create_model_group_record(db, user_id, project_id, dataset.id) + + result_group_id, result_group_name = tr.get_dest_group_info(dataset.id) + assert model_group.id == result_group_id + assert model_group.name == result_group_name + + +class TestShouldRetry: + @pytest.mark.asyncio() + async def test_should_retry(self, mocker: Any) -> None: + resp = mocker.Mock(ok=False) + assert await m.should_retry(resp) + + resp = mocker.Mock(ok=True) + assert not await m.should_retry(resp) diff --git a/ymir/backend/src/ymir_app/tests/scripts/test_clean_tasks.py b/ymir/backend/src/ymir_app/tests/scripts/test_clean_tasks.py new file mode 100644 index 0000000000..3e1774d74c --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/scripts/test_clean_tasks.py @@ -0,0 +1,15 @@ +from random import randint +from typing import Any + +from sqlalchemy.orm import Session + +from app import clean_tasks as m +from app.constants.state import TaskState +from tests.utils.tasks import create_task + + +def test_list_unfinished_tasks(db: Session, mocker: Any) -> None: + user_id = randint(100, 200) + task = create_task(db, user_id, state=TaskState.running) + tasks = m.list_unfinished_tasks(db) + assert task.id in [t.id for t in tasks] diff --git a/ymir/backend/src/ymir_app/tests/scripts/test_fix_dirty_repos.py b/ymir/backend/src/ymir_app/tests/scripts/test_fix_dirty_repos.py new file mode 100644 index 0000000000..7ddfdba7df --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/scripts/test_fix_dirty_repos.py @@ -0,0 +1,19 @@ +from typing import Any +from sqlalchemy.orm import Session + +from app import fix_dirty_repos as m +from tests.utils.projects import create_project_record + + +def test_iter_all_repos(db: Session) -> None: + project = create_project_record(db) + projects = list(m.iter_all_projects(db)) + assert project.id in [p.id for ps in projects for p in ps] + + +def test_fix_repo(db: Session, mocker: Any) -> None: + ctrl = mocker.Mock() + mocker.patch.object(m, "ControllerClient", return_value=ctrl) + project = create_project_record(db) + m.fix_repo(ctrl, project) + ctrl.fix_repo.assert_called_with(user_id=project.user_id, project_id=project.id) diff --git a/ymir/backend/src/ymir_app/tests/utils/models.py b/ymir/backend/src/ymir_app/tests/utils/models.py index 88dbb6e0d9..8d9b0454c3 100644 --- a/ymir/backend/src/ymir_app/tests/utils/models.py +++ b/ymir/backend/src/ymir_app/tests/utils/models.py @@ -12,22 +12,28 @@ def create_model_group_record( db: Session, user_id: int, project_id: Optional[int] = None, + training_dataset_id: Optional[int] = None, ): - project_id = project_id or randint(1000, 2000) + project_id = project_id or randint(100, 200) + training_dataset_id = training_dataset_id or randint(1000, 2000) j = { "name": random_lower_string(), "user_id": user_id, "project_id": project_id, + "training_dataset_id": training_dataset_id, } in_ = schemas.ModelGroupCreate(**j) record = crud.model_group.create_with_user_id(db, obj_in=in_, user_id=user_id) return record -def create_model(db: Session, user_id: int) -> models.Model: - project_id = randint(100, 200) - group = create_model_group_record(db, user_id, project_id) - group_id = group.id +def create_model( + db: Session, user_id: int, group_id: Optional[int] = None, project_id: Optional[int] = None +) -> models.Model: + project_id = project_id or randint(100, 200) + if not group_id: + group = create_model_group_record(db, user_id, project_id) + group_id = group.id task = crud.task.create_placeholder(db, type_=TaskType.training, user_id=user_id, project_id=project_id) model_in = schemas.ModelCreate( diff --git a/ymir/backend/src/ymir_app/tests/utils/projects.py b/ymir/backend/src/ymir_app/tests/utils/projects.py index c30cc96308..b439ea8d26 100644 --- a/ymir/backend/src/ymir_app/tests/utils/projects.py +++ b/ymir/backend/src/ymir_app/tests/utils/projects.py @@ -11,9 +11,11 @@ def create_project_record( db: Session, user_id: Optional[int] = None, + name: Optional[str] = None, ): + name = name or random_lower_string() user_id = user_id or randint(1, 20) - j = {"name": random_lower_string(), "training_keywords": [random_lower_string() for _ in range(3)]} + j = {"name": name, "training_keywords": [random_lower_string() for _ in range(3)]} in_ = schemas.ProjectCreate(**j) record = crud.project.create_project(db, obj_in=in_, user_id=user_id) diff --git a/ymir/backend/src/ymir_app/tests/utils/tasks.py b/ymir/backend/src/ymir_app/tests/utils/tasks.py index ac11618621..b2ade340e9 100644 --- a/ymir/backend/src/ymir_app/tests/utils/tasks.py +++ b/ymir/backend/src/ymir_app/tests/utils/tasks.py @@ -4,7 +4,7 @@ from sqlalchemy.orm import Session from app import crud, schemas -from app.constants.state import TaskType +from app.constants.state import TaskType, TaskState from tests.utils.utils import random_lower_string from tests.utils.datasets import create_dataset_record @@ -14,6 +14,7 @@ def create_task( user_id: int, project_id: Optional[int] = None, type_: TaskType = TaskType.mining, + state: TaskState = TaskState.done, ): project_id = project_id or randint(100, 200) j = { @@ -21,6 +22,7 @@ def create_task( "type": type_, "project_id": project_id, "parameters": {"dataset_id": randint(100, 200)}, + "state": state, } task_in = schemas.TaskCreate(**j) task = crud.task.create_task(db, obj_in=task_in, task_hash=random_lower_string(), user_id=user_id) diff --git a/ymir/backend/src/ymir_app/tests/utils/test_controller.py b/ymir/backend/src/ymir_app/tests/utils/test_controller.py index e0d2334735..f32f7c2d75 100644 --- a/ymir/backend/src/ymir_app/tests/utils/test_controller.py +++ b/ymir/backend/src/ymir_app/tests/utils/test_controller.py @@ -146,6 +146,7 @@ def test_send(self, mocker): def test_inference(self, mocker): user_id = random.randint(1000, 9000) + project_id = random.randint(1000, 9000) model_hash = random_lower_string() asset_dir = random_lower_string() channel_str = random_lower_string() @@ -153,7 +154,7 @@ def test_inference(self, mocker): docker_config = random_lower_string() cc = m.ControllerClient(channel_str) cc.send = mock_send = mocker.Mock() - cc.call_inference(user_id, model_hash, asset_dir, docker_image, docker_config) + cc.call_inference(user_id, project_id, model_hash, asset_dir, docker_image, docker_config) mock_send.assert_called() generated_req = mock_send.call_args[0][0].req assert generated_req.user_id == str(user_id) diff --git a/ymir/backend/src/ymir_app/tests/utils/test_err.py b/ymir/backend/src/ymir_app/tests/utils/test_err.py new file mode 100644 index 0000000000..a781db9529 --- /dev/null +++ b/ymir/backend/src/ymir_app/tests/utils/test_err.py @@ -0,0 +1,10 @@ +import pytest +from typing import Any +from app.utils import err as m + + +def test_retry(mocker: Any) -> None: + f_raise = mocker.Mock(side_effect=ValueError) + with pytest.raises(ValueError): + m.retry(f_raise, n_times=3) + assert f_raise.call_count == 3 diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py index 2584fc3a46..524c38f2a5 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_base.py @@ -100,6 +100,7 @@ def prepare_work_dir(self) -> str: # Only create work_dir for specific tasks. if self._request.req_type not in [ backend_pb2.RequestType.TASK_CREATE, + backend_pb2.RequestType.CMD_EVALUATE, backend_pb2.RequestType.CMD_FILTER, backend_pb2.RequestType.CMD_MERGE, backend_pb2.RequestType.CMD_INFERENCE, diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py new file mode 100644 index 0000000000..973898134a --- /dev/null +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_evaluate.py @@ -0,0 +1,75 @@ +from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker +from controller.utils import checker, revs, utils +from id_definition.error_codes import CTLResponseCode +from proto import backend_pb2 + + +class EvaluateInvoker(BaseMirControllerInvoker): + """ + invoker for command evaluate + request.in_dataset_ids: predictions + request.singleton_op: ground truth + request.task_id: task hash for this evaluate command + request.evaluate_config.conf_thr: confidence threshold + request.evaluate_config.iou_thrs_interval: from:to:step, default is '0.5:1.0:0.05', end point excluded + """ + def pre_invoke(self) -> backend_pb2.GeneralResp: + checker_resp = checker.check_request(request=self._request, + prerequisites=[ + checker.Prerequisites.CHECK_USER_ID, + checker.Prerequisites.CHECK_REPO_ID, + checker.Prerequisites.CHECK_REPO_ROOT_EXIST, + checker.Prerequisites.CHECK_TASK_ID, + checker.Prerequisites.CHECK_SINGLETON_OP, + checker.Prerequisites.CHECK_IN_DATASET_IDS, + ], + mir_root=self._repo_root) + if checker_resp.code != CTLResponseCode.CTR_OK: + return checker_resp + + conf_thr = self._request.evaluate_config.conf_thr + if conf_thr < 0 or conf_thr >= 1: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + f"invalid evaluate conf thr: {conf_thr:.2f}") + + iou_thrs_interval: str = self._request.evaluate_config.iou_thrs_interval or '0.5:1.0:0.05' + iou_thrs_interval_list = [float(v) for v in iou_thrs_interval.split(':')] + if len(iou_thrs_interval_list) != 3: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "invalid evaluate iou thrs interval: {}".format(iou_thrs_interval)) + for v in iou_thrs_interval_list: + if v < 0 or v > 1: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "invalid evaluate iou thrs interval: {}".format(iou_thrs_interval)) + + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + def invoke(self) -> backend_pb2.GeneralResp: + expected_type = backend_pb2.RequestType.CMD_EVALUATE + if self._request.req_type != expected_type: + return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, + f"expected: {expected_type} vs actual: {self._request.req_type}") + + ec = self._request.evaluate_config + command = [ + utils.mir_executable(), + 'evaluate', + '--root', + self._repo_root, + '--dst-rev', + revs.join_tvt_branch_tid(branch_id=self._request.task_id, tid=self._request.task_id), + '--src-revs', + revs.build_src_revs(in_src_revs=self._request.in_dataset_ids, his_tid=self._request.his_task_id), + '--gt-rev', + revs.join_tvt_branch_tid(branch_id=self._request.singleton_op, tid=self._request.singleton_op), + '-w', + self._work_dir, + '--conf-thr', + f"{ec.conf_thr:.2f}", + '--iou-thrs', + ec.iou_thrs_interval, + ] + if ec.need_pr_curve: + command.append('--need-pr-curve') + + return utils.run_command(command) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py index 0af9131cc9..18fc78b795 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_inference.py @@ -87,6 +87,7 @@ def invoke(self) -> backend_pb2.GeneralResp: config_file = self.gen_inference_config(self._request.docker_image_config, self._work_dir) self.inference_cmd( + repo_root=self._repo_root, work_dir=self._work_dir, config_file=config_file, model_location=self._assets_config["modelskvlocation"], @@ -99,10 +100,11 @@ def invoke(self) -> backend_pb2.GeneralResp: return self.generate_inference_response(inference_result) @classmethod - def inference_cmd(cls, work_dir: str, model_location: str, config_file: str, model_hash: str, index_file: str, - executor: str) -> backend_pb2.GeneralResp: + def inference_cmd(cls, repo_root: str, work_dir: str, model_location: str, config_file: str, model_hash: str, + index_file: str, executor: str) -> backend_pb2.GeneralResp: infer_cmd = [ - utils.mir_executable(), 'infer', '-w', work_dir, '--model-location', model_location, '--index-file', - index_file, '--model-hash', model_hash, '--task-config-file', config_file, "--executor", executor + utils.mir_executable(), 'infer', '--root', repo_root, '-w', work_dir, '--model-location', model_location, + '--index-file', index_file, '--model-hash', model_hash, '--task-config-file', config_file, "--executor", + executor ] return utils.run_command(infer_cmd) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py index ec649928eb..dbc3b0f821 100644 --- a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_init.py @@ -35,11 +35,6 @@ def invoke(self) -> backend_pb2.GeneralResp: os.link(self._label_storage_file, link_dst_file) command = [utils.mir_executable(), 'init', '--root', self._repo_root] - if self._request.in_class_ids: - command.extend([ - '--project-class-names', - ';'.join(self._user_labels.get_main_names(class_ids=list(self._request.in_class_ids))) - ]) command.extend( ['--with-empty-rev', revs.join_tvt_branch_tid(branch_id=self._request.task_id, tid=self._request.task_id)]) diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py new file mode 100644 index 0000000000..60b4f4d20d --- /dev/null +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_check.py @@ -0,0 +1,36 @@ +import subprocess +from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker +from controller.utils import checker, utils +from id_definition.error_codes import CTLResponseCode +from proto import backend_pb2 + + +class RepoCheckInvoker(BaseMirControllerInvoker): + def pre_invoke(self) -> backend_pb2.GeneralResp: + return checker.check_request( + request=self._request, + prerequisites=[ + checker.Prerequisites.CHECK_USER_ID, + checker.Prerequisites.CHECK_REPO_ID, + checker.Prerequisites.CHECK_REPO_ROOT_EXIST, + ], + mir_root=self._repo_root, + ) + + def invoke(self) -> backend_pb2.GeneralResp: + expected_type = backend_pb2.RequestType.CMD_REPO_CHECK + if self._request.req_type != expected_type: + return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, + f"expected: {expected_type} vs actual: {self._request.req_type}") + + response = backend_pb2.GeneralResp() + response.code = CTLResponseCode.CTR_OK + command = [utils.mir_executable(), 'status', '--root', self._repo_root] + result = subprocess.run(command, capture_output=True, text=True) + if 'clean' in result.stdout: + response.ops_ret = True + elif 'dirty' in result.stdout: + response.ops_ret = False + else: + raise RuntimeError("Cannot check status for mir_root {self._repo_root}\nresult: {result}") + return response diff --git a/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py new file mode 100644 index 0000000000..b2172ec2c7 --- /dev/null +++ b/ymir/backend/src/ymir_controller/controller/invoker/invoker_cmd_repo_clear.py @@ -0,0 +1,48 @@ +from controller.invoker.invoker_cmd_base import BaseMirControllerInvoker +from controller.utils import checker, invoker_call, utils +from id_definition.error_codes import CTLResponseCode +from controller.invoker.invoker_cmd_branch_commit import BranchCommitInvoker +from controller.invoker.invoker_cmd_repo_check import RepoCheckInvoker +from proto import backend_pb2 + + +class RepoClearInvoker(BaseMirControllerInvoker): + def pre_invoke(self) -> backend_pb2.GeneralResp: + return checker.check_request( + request=self._request, + prerequisites=[ + checker.Prerequisites.CHECK_USER_ID, + checker.Prerequisites.CHECK_REPO_ID, + checker.Prerequisites.CHECK_REPO_ROOT_EXIST, + ], + mir_root=self._repo_root, + ) + + def invoke(self) -> backend_pb2.GeneralResp: + expected_type = backend_pb2.RequestType.CMD_REPO_CLEAR + if self._request.req_type != expected_type: + return utils.make_general_response(CTLResponseCode.MIS_MATCHED_INVOKER_TYPE, + f"expected: {expected_type} vs actual: {self._request.req_type}") + + request = self._request + check_ret = invoker_call.make_invoker_cmd_call( + invoker=RepoCheckInvoker, + sandbox_root=self._sandbox_root, + req_type=backend_pb2.RequestType.CMD_REPO_CHECK, + user_id=request.user_id, + repo_id=request.repo_id, + task_id=request.task_id, + ) + # check failed, or repo is clean. + if check_ret.code != CTLResponseCode.CTR_OK or check_ret.ops_ret: + return check_ret + + return invoker_call.make_invoker_cmd_call( + invoker=BranchCommitInvoker, + sandbox_root=self._sandbox_root, + req_type=backend_pb2.RequestType.CMD_COMMIT, + user_id=request.user_id, + repo_id=request.repo_id, + task_id=request.task_id, + commit_message="Manually clear mir repo.", + ) diff --git a/ymir/backend/src/ymir_controller/controller/label_project_monitor.py b/ymir/backend/src/ymir_controller/controller/label_project_monitor.py index 49f15da0af..22c904e056 100644 --- a/ymir/backend/src/ymir_controller/controller/label_project_monitor.py +++ b/ymir/backend/src/ymir_controller/controller/label_project_monitor.py @@ -3,7 +3,7 @@ import os import sys -import requests +from requests.exceptions import ConnectionError, HTTPError, Timeout import sentry_sdk from apscheduler.schedulers.blocking import BlockingScheduler @@ -24,6 +24,10 @@ def trigger_mir_import( def remove_json_file(des_annotation_path: str) -> None: + if not os.path.isdir(des_annotation_path): + logging.error(f"des_annotation_path not exist: {des_annotation_path}") + return + for one_file in os.listdir(des_annotation_path): if one_file.endswith(".json"): os.remove(os.path.join(des_annotation_path, one_file)) @@ -40,10 +44,11 @@ def _gen_index_file(des_annotation_path: str) -> str: pic_path = json_content["task"]["data"]["image"].replace("data/local-files/?d=", "") media_files.append(pic_path) elif label_task_config.LABEL_FREE == label_task_config.LABEL_TOOL: - des_annotation_path = os.path.join(des_annotation_path, "images") - for one_file in os.listdir(des_annotation_path): - if one_file.endswith(".jpeg") or one_file.endswith(".jpg") or one_file.endswith(".png"): - media_files.append(os.path.join(des_annotation_path, one_file)) + des_annotation_media_path = os.path.join(des_annotation_path, "images") + if os.path.isdir(des_annotation_media_path): + for one_file in os.listdir(des_annotation_media_path): + if os.path.splitext(one_file)[1].lower() in [".jpeg", ".jpg", ".png"]: + media_files.append(os.path.join(des_annotation_media_path, one_file)) else: raise ValueError("LABEL_TOOL Error") @@ -71,7 +76,7 @@ def lable_task_monitor() -> None: label_instance.convert_annotation_to_voc( project_info["project_id"], project_info["des_annotation_path"] ) - except requests.HTTPError as e: + except (ConnectionError, HTTPError, Timeout) as e: sentry_sdk.capture_exception(e) logging.error(f"get label task {task_id} error: {e}, set task_id:{task_id} error") state = LogState.ERROR diff --git a/ymir/backend/src/ymir_controller/controller/server.py b/ymir/backend/src/ymir_controller/controller/server.py index 26c959faa0..005ac76c34 100644 --- a/ymir/backend/src/ymir_controller/controller/server.py +++ b/ymir/backend/src/ymir_controller/controller/server.py @@ -8,6 +8,7 @@ from typing import Any, Dict import grpc +from requests.exceptions import ConnectionError, HTTPError, Timeout import sentry_sdk import yaml @@ -48,8 +49,14 @@ def data_manage_request(self, request: backend_pb2.GeneralReq, context: Any) -> try: invoker_result = invoker.server_invoke() except errors.MirCtrError as e: - logging.exception(f"task {task_id} error: {e}") + logging.exception(f"task {task_id} MirCtrError error: {e}") return utils.make_general_response(e.error_code, e.error_message) + except (ConnectionError, HTTPError, Timeout) as e: + logging.exception(f"task {task_id} HTTPError error: {e}") + return utils.make_general_response(CTLResponseCode.INVOKER_HTTP_ERROR, str(e)) + except Exception as e: + logging.exception(f"task {task_id} general error: {e}") + return utils.make_general_response(CTLResponseCode.INVOKER_UNKNOWN_ERROR, str(e)) logging.info(f"task {task_id} result: {invoker_result}") if isinstance(invoker_result, backend_pb2.GeneralResp): diff --git a/ymir/backend/src/ymir_controller/controller/utils/checker.py b/ymir/backend/src/ymir_controller/controller/utils/checker.py index 36bac7de4c..b7fc7bbb77 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/checker.py +++ b/ymir/backend/src/ymir_controller/controller/utils/checker.py @@ -26,6 +26,8 @@ class Prerequisites(IntEnum): CHECK_COMMIT_MESSAGE = auto() CHECK_TASKINFO_IDS = auto() CHECK_SINGLE_IN_DATASET_ID = auto() + CHECK_IN_DATASET_IDS = auto() + CHECK_HIS_TASK_ID = auto() # check controller request @@ -162,6 +164,15 @@ def _check_user_root_not_exist(request: backend_pb2.GeneralReq, mir_root: str) - return utils.make_general_response(CTLResponseCode.CTR_OK, "") +def _check_in_dataset_ids(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: + in_dataset_ids = request.in_dataset_ids + if not in_dataset_ids: + return utils.make_general_response(CTLResponseCode.ARG_VALIDATION_FAILED, + "invalid in_dataset ids: {}".format(in_dataset_ids)) + + return utils.make_general_response(CTLResponseCode.CTR_OK, "") + + def _check_single_in_dataset_id(request: backend_pb2.GeneralReq, mir_root: str) -> backend_pb2.GeneralResp: in_dataset_ids = request.in_dataset_ids if not in_dataset_ids or len(in_dataset_ids) > 1: diff --git a/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py b/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py index 9d5430c4dd..b483b58bb2 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py +++ b/ymir/backend/src/ymir_controller/controller/utils/invoker_call.py @@ -27,7 +27,8 @@ def make_cmd_request(user_id: str = None, terminated_task_type: str = None, sampling_count: int = None, sampling_rate: float = None, - task_parameters: str = None) -> backend_pb2.GeneralReq: + task_parameters: str = None, + evaluate_config: backend_pb2.EvaluateConfig = None) -> backend_pb2.GeneralReq: request = backend_pb2.GeneralReq() if user_id is not None: request.user_id = user_id @@ -79,6 +80,8 @@ def make_cmd_request(user_id: str = None, request.sampling_rate = sampling_rate if task_parameters: request.task_parameters = task_parameters + if evaluate_config: + request.evaluate_config.CopyFrom(evaluate_config) return request @@ -108,7 +111,8 @@ def make_invoker_cmd_call(invoker: Any, terminated_task_type: str = None, sampling_count: int = None, sampling_rate: float = None, - work_dir: str = '') -> backend_pb2.GeneralReq: + work_dir: str = '', + evaluate_config: backend_pb2.EvaluateConfig = None) -> backend_pb2.GeneralReq: request = make_cmd_request(req_type=req_type, user_id=user_id, repo_id=repo_id, @@ -130,7 +134,8 @@ def make_invoker_cmd_call(invoker: Any, docker_image_config=docker_image_config, terminated_task_type=terminated_task_type, sampling_count=sampling_count, - sampling_rate=sampling_rate) + sampling_rate=sampling_rate, + evaluate_config=evaluate_config) invoker = invoker(sandbox_root=sandbox_root, request=request, assets_config=assets_config, diff --git a/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py b/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py index dcad36e73e..13e067146d 100644 --- a/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py +++ b/ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py @@ -4,6 +4,7 @@ invoker_cmd_branch_create, invoker_cmd_branch_delete, invoker_cmd_branch_list, + invoker_cmd_evaluate, invoker_cmd_filter, invoker_cmd_gpu_info, invoker_cmd_inference, @@ -13,6 +14,8 @@ invoker_cmd_log, invoker_cmd_merge, invoker_cmd_pull_image, + invoker_cmd_repo_check, + invoker_cmd_repo_clear, invoker_cmd_sampling, invoker_cmd_terminate, invoker_cmd_user_create, @@ -27,6 +30,7 @@ backend_pb2.CMD_BRANCH_DEL: invoker_cmd_branch_delete.BranchDeleteInvoker, backend_pb2.CMD_BRANCH_LIST: invoker_cmd_branch_list.BranchListInvoker, backend_pb2.CMD_COMMIT: invoker_cmd_branch_commit.BranchCommitInvoker, + backend_pb2.CMD_EVALUATE: invoker_cmd_evaluate.EvaluateInvoker, backend_pb2.CMD_FILTER: invoker_cmd_filter.FilterBranchInvoker, backend_pb2.CMD_GPU_INFO_GET: invoker_cmd_gpu_info.GPUInfoInvoker, backend_pb2.CMD_INFERENCE: invoker_cmd_inference.InferenceCMDInvoker, @@ -37,6 +41,8 @@ backend_pb2.CMD_MERGE: invoker_cmd_merge.MergeInvoker, backend_pb2.CMD_PULL_IMAGE: invoker_cmd_pull_image.ImageHandler, backend_pb2.CMD_TERMINATE: invoker_cmd_terminate.CMDTerminateInvoker, + backend_pb2.CMD_REPO_CHECK: invoker_cmd_repo_check.RepoCheckInvoker, + backend_pb2.CMD_REPO_CLEAR: invoker_cmd_repo_clear.RepoClearInvoker, backend_pb2.REPO_CREATE: invoker_cmd_init.InitInvoker, backend_pb2.TASK_CREATE: invoker_task_factory.CreateTaskInvokerFactory, backend_pb2.USER_CREATE: invoker_cmd_user_create.UserCreateInvoker, diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py new file mode 100644 index 0000000000..9ee4e0f6b6 --- /dev/null +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_evaluate.py @@ -0,0 +1,95 @@ +import os +import shutil +import unittest +from unittest import mock + +from controller.utils.invoker_call import make_invoker_cmd_call +from controller.utils.invoker_mapping import RequestTypeToInvoker +from proto import backend_pb2 + +import tests.utils as test_utils + + +class TestInvokerCmdEvaluate(unittest.TestCase): + # life cycle + def __init__(self, methodName: str) -> None: + # dir structure: + # test_involer_CLSNAME_sandbox_root + # ├── media_storage_root + # └── test_user + # └── ymir-dvc-test + super().__init__(methodName=methodName) + self._user_name = "user" + self._mir_repo_name = "repoid" + self._storage_name = "media_storage_root" + self._in_dataset_ids = ['t000aaaabbbbbbzzzzzzzzzzzzzzz1'] + self._gt_dataset_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz2' + self._task_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' + self._dst_dataset_id = 't000aaaabbbbbbzzzzzzzzzzzzzzz4' + + self._conf_thr = 0.3 + self._iou_thrs_interval = '0.5:1.0:0.05' + + self._sandbox_root = test_utils.dir_test_root(self.id().split(".")[-3:]) + self._user_root = os.path.join(self._sandbox_root, self._user_name) + self._mir_repo_root = os.path.join(self._user_root, self._mir_repo_name) + self._storage_root = os.path.join(self._sandbox_root, self._storage_name) + + def setUp(self) -> None: + test_utils.check_commands() + self._prepare_dirs() + self._prepare_mir_repo() + return super().setUp() + + def tearDown(self) -> None: + if os.path.isdir(self._sandbox_root): + shutil.rmtree(self._sandbox_root) + return super().tearDown() + + # protected: setup and teardown + def _prepare_dirs(self): + if os.path.isdir(self._sandbox_root): + shutil.rmtree(self._sandbox_root) + os.makedirs(self._sandbox_root) + os.mkdir(self._user_root) + os.mkdir(self._mir_repo_root) + os.mkdir(self._storage_root) + + def _prepare_mir_repo(self): + # init repo + test_utils.mir_repo_init(self._mir_repo_root) + + # protected: mocked + def _mock_run_func(*args, **kwargs): + ret = type('', (), {})() + ret.returncode = 0 + ret.stdout = 'done' + return ret + + # public: test cases + @mock.patch("subprocess.run", side_effect=_mock_run_func) + def test_evaluate_00(self, mock_run): + evaluate_config = backend_pb2.EvaluateConfig() + evaluate_config.conf_thr = self._conf_thr + evaluate_config.iou_thrs_interval = self._iou_thrs_interval + + response = make_invoker_cmd_call(invoker=RequestTypeToInvoker[backend_pb2.CMD_EVALUATE], + sandbox_root=self._sandbox_root, + req_type=backend_pb2.CMD_EVALUATE, + user_id=self._user_name, + repo_id=self._mir_repo_name, + task_id=self._task_id, + in_dataset_ids=self._in_dataset_ids, + singleton_op=self._gt_dataset_id, + evaluate_config=evaluate_config) + self.assertEqual(response.code, 0) + self.assertEqual(response.message, 'done') + + work_dir = os.path.join(self._sandbox_root, "work_dir", backend_pb2.RequestType.Name(backend_pb2.CMD_EVALUATE), + self._task_id) + expected_cmd = f"mir evaluate --root {self._mir_repo_root} --dst-rev {self._task_id}@{self._task_id}" + expected_cmd += f" --src-revs {self._in_dataset_ids[0]}" + expected_cmd += f" --gt-rev {self._gt_dataset_id}@{self._gt_dataset_id}" + expected_cmd += f" -w {work_dir} --conf-thr {self._conf_thr:.2f}" + expected_cmd += f" --iou-thrs {self._iou_thrs_interval}" + mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py index d42644a535..557f280d24 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_inference.py @@ -117,7 +117,7 @@ def test_invoker_00(self, mock_run): index_file = os.path.join(working_dir, "inference_pic_index.txt") - cmd = (f"mir infer -w {working_dir} --model-location {self._storage_root} " + cmd = (f"mir infer --root {self._mir_repo_root} -w {working_dir} --model-location {self._storage_root} " f"--index-file {index_file} --model-hash {model_hash} " f"--task-config-file {config_file} --executor {inference_image}") diff --git a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py index 3947e6ddbc..297d27a6a0 100644 --- a/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py +++ b/ymir/backend/src/ymir_controller/tests/unit/test_invoker_cmd_init.py @@ -92,7 +92,6 @@ def test_invoker_init_01(self, mock_run): print(MessageToDict(response)) expected_cmd = f"mir init --root {os.path.join(self._user_root, self._mir_repo_name)}" - expected_cmd += ' --project-class-names person;cat' expected_cmd += f" --with-empty-rev {self._task_id}@{self._task_id}" mock_run.assert_called_once_with(expected_cmd.split(' '), capture_output=True, text=True) diff --git a/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py b/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py index f2ee04a4bf..1965667bd8 100644 --- a/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py +++ b/ymir/backend/src/ymir_monitor/monitor/utils/crontab_job.py @@ -1,3 +1,4 @@ +from datetime import datetime import json import logging import sys @@ -7,6 +8,7 @@ from apscheduler.schedulers.blocking import BlockingScheduler from common_utils.percent_log_util import PercentLogHandler, PercentResult, LogState +from id_definition.error_codes import MonitorErrorCode from monitor.config import settings from monitor.libs import redis_handler from monitor.libs.redis_handler import RedisHandler @@ -53,8 +55,13 @@ def update_monitor_percent_log() -> None: runtime_log_content = PercentLogHandler.parse_percent_log(log_path) except ValueError as e: sentry_sdk.capture_exception(e) - logging.warning(e) - runtime_log_content = PercentResult(task_id=task_id, timestamp="123", percent=0.0, state=LogState.ERROR) + logging.exception(e) + runtime_log_content = PercentResult(task_id=task_id, + timestamp=f"{datetime.now().timestamp():.6f}", + percent=1.0, + state=LogState.ERROR, + state_code=MonitorErrorCode.PERCENT_LOG_PARSE_ERROR, + state_message=f"logfile parse error: {log_path}") runtime_log_contents[log_path] = runtime_log_content if runtime_log_content.timestamp != previous_log_content["timestamp"]: diff --git a/ymir/backend/src/ymir_postman/__init__.py b/ymir/backend/src/ymir_postman/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/ymir/backend/src/ymir_postman/pm_app_server.py b/ymir/backend/src/ymir_postman/pm_app_server.py deleted file mode 100644 index c0db4a70b2..0000000000 --- a/ymir/backend/src/ymir_postman/pm_app_server.py +++ /dev/null @@ -1,54 +0,0 @@ -""" emtry point for postman service """ - -import asyncio -import json -import logging -from typing import List - -from fastapi import FastAPI -from fastapi.encoders import jsonable_encoder -from fastapi_socketio import SocketManager -from starlette.middleware.cors import CORSMiddleware - -from postman import entities -from postman.event_dispatcher import EventDispatcher -from postman.settings import constants, settings - -uvicorn_logger = logging.getLogger("uvicorn") - -# main service and api implememtations -app = FastAPI(title=constants.PROJECT_NAME) -if settings.BACKEND_CORS_ORIGINS: - app.add_middleware( - CORSMiddleware, - allow_origins=settings.BACKEND_CORS_ORIGINS, - allow_credentials=False, - allow_methods=["*"], - allow_headers=["*"], - ) - # binded to /ws by default - # if use with fastapi and cors settings - # cors_allowed_origins set to []: https://github.com/pyropy/fastapi-socketio/issues/28 - socket_manager = SocketManager(app=app, cors_allowed_origins=[]) -else: - socket_manager = SocketManager(app=app) - - -# fastapi handlers -@app.post('/events/taskstates', response_model=entities.EventResp) -def post_task_states(tid_to_taskstates: entities.TaskStateDict) -> entities.EventResp: - uvicorn_logger.info(f"/events/taskstates: {tid_to_taskstates}") - EventDispatcher.add_event(event_name='/events/taskstates', - event_topic=constants.EVENT_TOPIC_RAW, - event_body=json.dumps(jsonable_encoder(tid_to_taskstates))) - - return entities.EventResp(return_code=0, return_msg=f"done, received: {len(tid_to_taskstates)} tasks") - - -@app.post('/events/push', response_model=entities.EventResp) -def post_events_push(event_payloads: List[entities.EventPayload]) -> entities.EventResp: - uvicorn_logger.info(f"/events/push: {event_payloads}") - - for payload in event_payloads: - asyncio.run(app.sio.emit(event=payload.event, data=payload.data, namespace=payload.namespace)) # type: ignore - return entities.EventResp(return_code=0, return_msg='done') diff --git a/ymir/backend/src/ymir_postman/pm_server.py b/ymir/backend/src/ymir_postman/pm_server.py deleted file mode 100644 index 5eb234a4cb..0000000000 --- a/ymir/backend/src/ymir_postman/pm_server.py +++ /dev/null @@ -1,25 +0,0 @@ -import logging -import sys - -from postman.event_dispatcher import EventDispatcher -from postman.handlers import task_state_handler -from postman.settings import settings - - -def main() -> int: - is_debug_mode = '-d' in sys.argv - - # for test: debug logs - log_level = logging.DEBUG if is_debug_mode else logging.INFO - logging.basicConfig(stream=sys.stdout, format='%(levelname)-8s: [%(asctime)s] %(message)s', level=log_level) - - logging.info(f"postman event dispatcher start with:\n debug: {is_debug_mode} \n settings: {settings}") - - # event dispatcher - EventDispatcher(event_name='/events/taskstates', handler=task_state_handler.on_task_state).start() - - return 0 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/ymir/backend/src/ymir_postman/postman/entities.py b/ymir/backend/src/ymir_postman/postman/entities.py deleted file mode 100644 index 3f171c6525..0000000000 --- a/ymir/backend/src/ymir_postman/postman/entities.py +++ /dev/null @@ -1,63 +0,0 @@ -from enum import IntEnum -from typing import Dict, List, Optional - -from pydantic import BaseModel, Field - - -class ResultType(IntEnum): - no_result = 0 - dataset = 1 - model = 2 - - -class TaskStateExtra(BaseModel): - user_id: str - - -class TaskStatePercent(BaseModel): - task_id: str - timestamp: float = Field(gt=0) - percent: float = Field(ge=0, le=1) - state: int - state_code: int - state_message: Optional[str] - stack_error_info: Optional[str] - - result_type: Optional[int] - result_id: Optional[int] - result_state: Optional[int] - - def update_with_app_response(self, result: Dict) -> None: - result_type = ResultType(result["result_type"]) - if result_type is ResultType.dataset: - result_record = result["result_dataset"] - elif result_type is ResultType.model: - result_record = result["result_model"] - else: - return - self.result_type = result["result_type"] - self.result_id = result_record["id"] - self.result_state = result_record["result_state"] - - -class TaskState(BaseModel): - task_extra_info: TaskStateExtra - percent_result: TaskStatePercent - - -TaskStateDict = Dict[str, TaskState] - - -class EventPayload(BaseModel): - event: str - namespace: Optional[str] = None - data: dict - - -EventPayloadList = List[EventPayload] - - -# data models: resp -class EventResp(BaseModel): - return_code: int - return_msg: str diff --git a/ymir/backend/src/ymir_postman/postman/event_dispatcher.py b/ymir/backend/src/ymir_postman/postman/event_dispatcher.py deleted file mode 100644 index fbc294229d..0000000000 --- a/ymir/backend/src/ymir_postman/postman/event_dispatcher.py +++ /dev/null @@ -1,67 +0,0 @@ -""" event dispatcher """ - -import logging -from typing import Callable - -import redis - -from postman.settings import constants, settings - - -# public: class EventDispatcher -class EventDispatcher: - def __init__(self, event_name: str, handler: Callable) -> None: - if not event_name: - raise ValueError('empty event name') - - if not handler: - raise ValueError('empty handler') - - self._event_handler = handler - self._event_name = event_name - self._group_name = f"group:{event_name}" - self._redis_connect: redis.Redis = self.get_redis_connect() - - # creates stream and consumer group - self.add_event(event_name=self._event_name, event_topic=constants.EVENT_TOPIC_INNER, event_body='') - try: - self._redis_connect.xgroup_create(name=self._event_name, groupname=self._group_name, id='$') - except redis.ResponseError as e: - logging.debug(f"xgroup_create: {e}") - - # public: general - def start(self) -> None: - """ start to listen """ - logging.debug(f"ed start. event: {self._event_name}, group: {self._group_name}") - kvs = self._redis_connect.xreadgroup(groupname=self._group_name, - consumername='default', - streams={self._event_name: '0'}) - while True: - for _, stream_msgs in kvs: - if not stream_msgs: - continue - - self._event_handler(ed=self, mid_and_msgs=stream_msgs) - - msg_ids, *_ = zip(*stream_msgs) - self._redis_connect.xack(self._event_name, self._group_name, *msg_ids) - self._redis_connect.xdel(self._event_name, *msg_ids) - - kvs = self._redis_connect.xreadgroup(groupname=self._group_name, - consumername='default', - streams={self._event_name: '>'}, - block=0) - - @classmethod - def get_redis_connect(cls) -> redis.Redis: - return redis.StrictRedis.from_url(settings.PM_REDIS_URI, encoding="utf8", decode_responses=True) - - @classmethod - def add_event(cls, event_name: str, event_topic: str, event_body: str) -> None: - cls.get_redis_connect().xadd(name=event_name, - fields={ - 'topic': event_topic, - 'body': event_body - }, - maxlen=settings.MAX_REDIS_STREAM_LENGTH, - approximate=True) diff --git a/ymir/backend/src/ymir_postman/postman/handlers/task_state_handler.py b/ymir/backend/src/ymir_postman/postman/handlers/task_state_handler.py deleted file mode 100644 index a436cbb4f8..0000000000 --- a/ymir/backend/src/ymir_postman/postman/handlers/task_state_handler.py +++ /dev/null @@ -1,200 +0,0 @@ -from collections import defaultdict -from enum import IntEnum -import json -import logging -import requests -import time -from typing import Any, Dict, List, Set, Tuple - -from fastapi.encoders import jsonable_encoder -from pydantic import parse_raw_as - -from postman import entities, event_dispatcher # type: ignore -from postman.settings import constants, settings - - -class _UpdateDbConclusion(IntEnum): - SUCCESS = 0 # update success - RETRY = 1 # update failed, need retry - DROP = 2 # update failed, need drop - - -def _conclusion_from_return_code(return_code: int) -> _UpdateDbConclusion: - if return_code == constants.RC_OK: - return _UpdateDbConclusion.SUCCESS - elif return_code == constants.RC_FAILED_TO_UPDATE_TASK_STATUS: - return _UpdateDbConclusion.RETRY - else: - return _UpdateDbConclusion.DROP - - -class _UpdateDbResult: - def __init__(self) -> None: - self.success_tids: Set[str] = set() - self.retry_tids: Set[str] = set() - self.drop_tids: Set[str] = set() - - def __repr__(self) -> str: - return f"success: {self.success_tids}, retry: {self.retry_tids}, drop: {self.drop_tids}" - - -redis_connect = event_dispatcher.EventDispatcher.get_redis_connect() - - -def on_task_state(ed: event_dispatcher.EventDispatcher, mid_and_msgs: list, **kwargs: Any) -> None: - tid_to_taskstates_latest = _aggregate_msgs(mid_and_msgs) - if not tid_to_taskstates_latest: - return - - # update db, save failed - update_db_result = _update_db(tid_to_tasks=tid_to_taskstates_latest) - logging.info(f"update db result: {update_db_result}") - _update_sio(tids=update_db_result.success_tids, tid_to_taskstates=tid_to_taskstates_latest) - _update_retry(retry_tids=update_db_result.retry_tids, tid_to_taskstates_latest=tid_to_taskstates_latest) - # delay and retry - if update_db_result.retry_tids: - time.sleep(settings.RETRY_SECONDS) - ed.add_event(event_name=ed._event_name, event_topic=constants.EVENT_TOPIC_INNER, event_body="") - - -def _aggregate_msgs(mid_and_msgs: List[Tuple[str, dict]]) -> entities.TaskStateDict: - """ - for all redis stream msgs, deserialize them to entities, select the latest for each tid - """ - tid_to_taskstates_latest: entities.TaskStateDict = _load_retry() - if mid_and_msgs: - for _, msg in mid_and_msgs: - msg_topic = msg["topic"] - if msg_topic != constants.EVENT_TOPIC_RAW: - continue - - tid_to_taskstates = parse_raw_as(entities.TaskStateDict, msg["body"]) - for tid, taskstate in tid_to_taskstates.items(): - if ( - tid not in tid_to_taskstates_latest - or tid_to_taskstates_latest[tid].percent_result.timestamp < taskstate.percent_result.timestamp - ): - tid_to_taskstates_latest[tid] = taskstate - return tid_to_taskstates_latest - - -# private: update db -def _update_retry(retry_tids: Set[str], tid_to_taskstates_latest: entities.TaskStateDict) -> None: - """ - save failed taskstates to redis cache - - Args: - retry_tids (Set[str]) - tid_to_taskstates_latest (entities.TaskStateDict) - """ - retry_tid_to_tasks = {tid: tid_to_taskstates_latest[tid] for tid in retry_tids if tid in tid_to_taskstates_latest} - json_str = json.dumps(jsonable_encoder(retry_tid_to_tasks)) - redis_connect.set(name=settings.RETRY_CACHE_KEY, value=json_str) - - -def _load_retry() -> entities.TaskStateDict: - """ - load failed taskstates from redis cache - - Returns: - entities.TaskStateDict - """ - json_str = redis_connect.get(name=settings.RETRY_CACHE_KEY) - if not json_str: - return {} - - return parse_raw_as(entities.TaskStateDict, json_str) or {} - - -def _update_db(tid_to_tasks: entities.TaskStateDict) -> _UpdateDbResult: - """ - update db for all tasks in tid_to_tasks - - Args: - tid_to_tasks (entities.TaskStateDict): key: tid, value: TaskState - - Returns: - _UpdateDbResult: update db result (success, retry and drop tids) - - Side Effects: - update task in tid_to_tasks in place - """ - update_db_result = _UpdateDbResult() - custom_headers = {"api-key": settings.APP_API_KEY} - for task_id, task in tid_to_tasks.items(): - try: - resp = _update_db_single_task(task_id, task, custom_headers) - except requests.exceptions.RequestException: - logging.exception("update db single task error ignored: {tid}", task_id) - update_db_result.retry_tids.add(task_id) - continue - - code = _conclusion_from_return_code(int(resp["code"])) - if code == _UpdateDbConclusion.SUCCESS: - # fixme - # adhoc append dataset or model info into task - task.percent_result.update_with_app_response(resp["result"]) - update_db_result.success_tids.add(task_id) - else: - update_db_result.drop_tids.add(task_id) - - return update_db_result - - -def _update_db_single_task(tid: str, task: entities.TaskState, custom_headers: dict) -> Dict: - """ - update db for single task - - Args: - tid (str): task id - task (entities.TaskState): task state - custom_headers (dict) - - Returns: - Tuple[str, _UpdateDbConclusion]: error_message, result conclusion (success, retry or drop) - """ - url = f"http://{settings.APP_API_HOST}/api/v1/tasks/status" - - # task_data: see api: /api/v1/tasks/status - task_data = { - "hash": tid, - "timestamp": task.percent_result.timestamp, - "state": task.percent_result.state, - "percent": task.percent_result.percent, - "state_code": task.percent_result.state_code, - "state_message": task.percent_result.state_message, - } - - logging.debug(f"update db single task request: {task_data}") - response = requests.post(url=url, headers=custom_headers, json=task_data) - response.raise_for_status() - return response.json() - - -# private: socketio -def _update_sio(tids: Set[str], tid_to_taskstates: entities.TaskStateDict) -> None: - if not tids: - return - - event_payloads = _remap_payloads_by_uid({tid: tid_to_taskstates[tid] for tid in tids if tid in tid_to_taskstates}) - - url = f"{settings.PM_URL}/events/push" - try: - requests.post(url=url, json=jsonable_encoder(event_payloads)) - except requests.exceptions.RequestException: - logging.exception("update sio error ignored") - - -def _remap_payloads_by_uid(tid_to_taskstates: entities.TaskStateDict) -> entities.EventPayloadList: - # sort by user - uid_to_taskdatas: Dict[str, Dict[str, entities.TaskStatePercent]] = defaultdict(dict) - for tid, taskstate in tid_to_taskstates.items(): - uid = taskstate.task_extra_info.user_id - uid_to_taskdatas[uid][tid] = taskstate.percent_result - - # get event payloads - event_payloads = [ - entities.EventPayload(event="update_taskstate", namespace=f"/{uid}", data=tid_to_taskdatas) - for uid, tid_to_taskdatas in uid_to_taskdatas.items() - ] - return event_payloads diff --git a/ymir/backend/src/ymir_postman/postman/settings.py b/ymir/backend/src/ymir_postman/postman/settings.py deleted file mode 100644 index b7bad64113..0000000000 --- a/ymir/backend/src/ymir_postman/postman/settings.py +++ /dev/null @@ -1,30 +0,0 @@ -import os -from typing import List - -from pydantic import BaseSettings - - -class Settings(BaseSettings): - APP_API_HOST: str = os.environ['APP_API_HOST'] - APP_API_KEY: str = os.environ['APP_API_KEY'] - PM_REDIS_URI: str = os.environ['BACKEND_REDIS_URL'] - PM_URL: str = os.environ['POSTMAN_URL'] - - BACKEND_CORS_ORIGINS: List[str] = [] - - RETRY_CACHE_KEY = 'retryhash:/events/taskstates' - MAX_REDIS_STREAM_LENGTH = 10 * 3600 * 24 * 2 # two days, with 10 messages for each second - RETRY_SECONDS = 60 - - -class Constants(BaseSettings): - PROJECT_NAME: str = "ymir postman" - EVENT_TOPIC_RAW = 'raw' - EVENT_TOPIC_INNER = '_inner_' - - RC_OK = 0 - RC_FAILED_TO_UPDATE_TASK_STATUS = 110706 - - -settings = Settings() -constants = Constants() diff --git a/ymir/backend/src/ymir_postman/tests/sio_client.py b/ymir/backend/src/ymir_postman/tests/sio_client.py deleted file mode 100644 index cc496cb543..0000000000 --- a/ymir/backend/src/ymir_postman/tests/sio_client.py +++ /dev/null @@ -1,27 +0,0 @@ -""" for test """ -import sys -from typing import Dict -import socketio - - -def update_taskstate(*args: tuple, **kwargs: Dict) -> None: - print(f"update_taskstate: {args}, {kwargs}") - - -def main() -> int: - url = sys.argv[1] - namespace = sys.argv[2] - - print(f"connecting to url: {url}, namespace: {namespace}") - - sio = socketio.Client(logger=True) - sio.connect(url, namespaces=[namespace], socketio_path='/ws/socket.io') - sio.event(namespace=namespace)(update_taskstate) - sio.wait() - - return 0 - - -if __name__ == '__main__': - # usage: python sio_client.py - sys.exit(main()) diff --git a/ymir/backend/src/ymir_postman/tests/test_events.py b/ymir/backend/src/ymir_postman/tests/test_events.py deleted file mode 100644 index bf06065694..0000000000 --- a/ymir/backend/src/ymir_postman/tests/test_events.py +++ /dev/null @@ -1,5 +0,0 @@ -import unittest - - -class TestEvents(unittest.TestCase): - pass diff --git a/ymir/backend/src/ymir_postman/uvicorn_log_config.json b/ymir/backend/src/ymir_postman/uvicorn_log_config.json deleted file mode 100644 index 98045cf8de..0000000000 --- a/ymir/backend/src/ymir_postman/uvicorn_log_config.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "version": 1, - "disable_existing_loggers": false, - "formatters": { - "default": { - "()": "uvicorn.logging.DefaultFormatter", - "fmt": "%(asctime)s - %(levelprefix)s %(message)s", - "use_colors": null - }, - "access": { - "()": "uvicorn.logging.AccessFormatter", - "fmt": "%(asctime)s - %(levelprefix)s %(client_addr)s - \"%(request_line)s\" %(status_code)s" - } - }, - "handlers": { - "default": { - "formatter": "default", - "class": "logging.StreamHandler", - "stream": "ext://sys.stdout" - }, - "access": { - "formatter": "access", - "class": "logging.StreamHandler", - "stream": "ext://sys.stdout" - } - }, - "loggers": { - "uvicorn": { - "handlers": [ - "default" - ], - "level": "INFO" - }, - "uvicorn.error": { - "level": "INFO" - }, - "uvicorn.access": { - "handlers": [ - "access" - ], - "level": "INFO", - "propagate": false - } - } - } diff --git a/ymir/backend/src/ymir_viz/README.md b/ymir/backend/src/ymir_viz/README.md index 53134a5096..5f94d48401 100644 --- a/ymir/backend/src/ymir_viz/README.md +++ b/ymir/backend/src/ymir_viz/README.md @@ -4,36 +4,45 @@ ## Features -- Load ymir-cmd's persistence files,provide RESTful API for query based on Redis +- Load ymir-cmd's persistence files, provide RESTful API for query based on Redis -## Externale dependency -- ymir-command's persistenc file path,called sanbox path +## External dependency + +- ymir-command's persistence file path (sandbox path) ## Development + To install dev dependencies you can use the following command: -```shell script -pip3 install -r requirements.txt&&pip3 install -r requirements-dev.txt + +```bash +pip3 install -r requirements.txt -r requirements-dev.txt ``` To contribute to the framework + - Contribute to definition of API: - Edit `./doc/ymir_viz_API.yaml` - run `sh codegen.sh` to generate code -- Do not edit the folder of `./src/swagger_models` and `./src/swagger`,because it is generated by `swagger-codegen` +- Do not edit `./src/swagger_models` and `./src/swagger`,which are generated by `swagger-codegen` -To run server local you can use the following command: -``` +Run local server with the following command: + +```bash python wsgi.py ``` + Then you can see the API definition by Swagger in browser: -```shell script + +``` http://localhost:9099/v1/ui/ ``` -For more information about swagger-codegen,have a look [here](https://github.com/swagger-api/swagger-codegen). +For more information about swagger-codegen, have a look [here](https://github.com/swagger-api/swagger-codegen). + +### Tests -### Running Tests Unit tests are within the tests folder and we recommend to run them using `tox`. -``` + +```bash tox -``` \ No newline at end of file +``` diff --git a/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml b/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml index 378146e0c6..a917a1f0ed 100644 --- a/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml +++ b/ymir/backend/src/ymir_viz/doc/ymir_viz_API.yaml @@ -187,6 +187,41 @@ paths: $ref: "#/components/schemas/ModelResult" "400": description: Task not exists + "/users/{user_id}/repositories/{repo_id}/branches/{branch_id}/evaluations": + get: + tags: + - evaluation + summary: "get dataset evaluation result" + description: "get dataset evaluation result" + operationId: get_dataset_evaluations + parameters: + - name: user_id + in: path + description: user_id + required: true + schema: + type: string + - name: repo_id + in: path + description: repo_id + required: true + schema: + type: string + - name: branch_id + in: path + description: branch_id + required: true + schema: + type: string + responses: + "200": + description: successful operation + content: + application/json: + schema: + $ref: "#/components/schemas/DatasetEvaluationResult" + "400": + description: DatasetEvaluation not exists components: schemas: AssetInfo: @@ -315,6 +350,24 @@ components: type: integer project_negative_images_cnt: type: integer + DatasetEvaluationResult: + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + result: + type: object + additionalProperties: + type: object + properties: + conf_thr: + type: number + iou_evaluations: + type: object + additionalProperties: + $ref: '#/components/schemas/DatasetEvaluation' + iou_averaged_evaluation: + $ref: '#/components/schemas/DatasetEvaluation' ApiResponse: type: object @@ -346,3 +399,29 @@ components: type: integer score: type: integer + DatasetEvaluation: + type: object + properties: + ci_evaluations: + type: object + additionalProperties: + $ref: "#/components/schemas/DatasetEvaluationElement" + topic_evaluations: + type: object + additionalProperties: + $ref: "#/components/schemas/DatasetEvaluationElement" + ci_averaged_evaluation: + $ref: "#/components/schemas/DatasetEvaluationElement" + DatasetEvaluationElement: + type: object + properties: + ap: + type: number + ar: + type: number + tp: + type: integer + fp: + type: integer + fn: + type: integer diff --git a/ymir/backend/src/ymir_viz/src/controllers/evaluation_controller.py b/ymir/backend/src/ymir_viz/src/controllers/evaluation_controller.py new file mode 100644 index 0000000000..a1ff05d7ce --- /dev/null +++ b/ymir/backend/src/ymir_viz/src/controllers/evaluation_controller.py @@ -0,0 +1,34 @@ +import logging + +from src.config import viz_settings +from src.libs import utils +from src.swagger_models import DatasetEvaluationResult +from src.viz_models import pb_reader + + +def get_dataset_evaluations(user_id: str, repo_id: str, branch_id: str) -> DatasetEvaluationResult: + """ + get dataset evaluations result + + :param user_id: user_id + :type user_id: str + :param repo_id: repo_id + :type repo_id: str + :param branch_id: branch_id + :type branch_id: str + + :rtype: DatasetEvaluationResult + """ + evaluations = pb_reader.MirStorageLoader( + sandbox_root=viz_settings.BACKEND_SANDBOX_ROOT, + user_id=user_id, + repo_id=repo_id, + branch_id=branch_id, + task_id=branch_id, + ).get_dataset_evaluations() + + resp = utils.suss_resp() + resp["result"] = evaluations + logging.info("successfully get_dataset_evaluations from branch %s", branch_id) + + return DatasetEvaluationResult(**resp) diff --git a/ymir/backend/src/ymir_viz/src/libs/exceptions.py b/ymir/backend/src/ymir_viz/src/libs/exceptions.py index 8252d9d9d8..5a84bd5c90 100644 --- a/ymir/backend/src/ymir_viz/src/libs/exceptions.py +++ b/ymir/backend/src/ymir_viz/src/libs/exceptions.py @@ -9,7 +9,10 @@ class VizException(Exception): message = "Exception Occured" def __init__( - self, message: Optional[str] = None, status_code: Optional[int] = None, code: Optional[int] = None, + self, + message: Optional[str] = None, + status_code: Optional[int] = None, + code: Optional[int] = None, ): super().__init__() self.status_code = status_code or self.status_code @@ -35,3 +38,8 @@ class BranchNotExists(VizException): class ModelNotExists(VizException): code = VizErrorCode.MODEL_NOT_EXISTS message = "model not found" + + +class DatasetEvaluationNotExists(VizException): + code = VizErrorCode.DATASET_EVALUATION_NOT_EXISTS + message = "dataset evaluation not found" diff --git a/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml b/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml index df105dfb42..5a2e630020 100644 --- a/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml +++ b/ymir/backend/src/ymir_viz/src/swagger/swagger.yaml @@ -223,6 +223,48 @@ paths: "400": description: Task not exists x-openapi-router-controller: src.controllers.model_controller + /users/{user_id}/repositories/{repo_id}/branches/{branch_id}/evaluations: + get: + tags: + - evaluation + summary: get dataset evaluation result + description: get dataset evaluation result + operationId: get_dataset_evaluations + parameters: + - name: user_id + in: path + description: user_id + required: true + style: simple + explode: false + schema: + type: string + - name: repo_id + in: path + description: repo_id + required: true + style: simple + explode: false + schema: + type: string + - name: branch_id + in: path + description: branch_id + required: true + style: simple + explode: false + schema: + type: string + responses: + "200": + description: successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/DatasetEvaluationResult' + "400": + description: DatasetEvaluation not exists + x-openapi-router-controller: src.controllers.evaluation_controller components: schemas: AssetInfo: @@ -292,6 +334,15 @@ components: properties: result: $ref: '#/components/schemas/DatasetResult_result' + DatasetEvaluationResult: + allOf: + - $ref: '#/components/schemas/ApiResponse' + - type: object + properties: + result: + type: object + additionalProperties: + $ref: '#/components/schemas/DatasetEvaluationResult_result' ApiResponse: type: object properties: @@ -306,6 +357,32 @@ components: type: array items: $ref: '#/components/schemas/Annotations_inner' + DatasetEvaluation: + type: object + properties: + ci_evaluations: + type: object + additionalProperties: + $ref: '#/components/schemas/DatasetEvaluationElement' + topic_evaluations: + type: object + additionalProperties: + $ref: '#/components/schemas/DatasetEvaluationElement' + ci_averaged_evaluation: + $ref: '#/components/schemas/DatasetEvaluationElement' + DatasetEvaluationElement: + type: object + properties: + ap: + type: number + ar: + type: number + tp: + type: integer + fp: + type: integer + fn: + type: integer AssetsResult_result: type: object properties: @@ -319,11 +396,13 @@ components: type: integer tatal: type: integer + example: null AssetsMetaInfo_metadata_timestamp: type: object properties: start: type: integer + example: null AssetsMetaInfo_metadata: type: object properties: @@ -337,6 +416,7 @@ components: type: integer timestamp: $ref: '#/components/schemas/AssetsMetaInfo_metadata_timestamp' + example: null ModelResult_result: type: object properties: @@ -351,6 +431,7 @@ components: type: string executor_config: type: string + example: null DatasetResult_result_negative_info: type: object properties: @@ -358,6 +439,7 @@ components: type: integer project_negative_images_cnt: type: integer + example: null DatasetResult_result: type: object properties: @@ -374,6 +456,19 @@ components: example: "{'cat':8}" negative_info: $ref: '#/components/schemas/DatasetResult_result_negative_info' + example: null + DatasetEvaluationResult_result: + type: object + properties: + conf_thr: + type: number + iou_evaluations: + type: object + additionalProperties: + $ref: '#/components/schemas/DatasetEvaluation' + iou_averaged_evaluation: + $ref: '#/components/schemas/DatasetEvaluation' + example: null Annotations_inner: type: object properties: diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py b/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py index ba1d075164..0156addaa7 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/__init__.py @@ -2,7 +2,6 @@ # flake8: noqa from __future__ import absolute_import - # import models into model package from src.swagger_models.annotations import Annotations from src.swagger_models.annotations_inner import AnnotationsInner @@ -14,6 +13,10 @@ from src.swagger_models.assets_meta_info_metadata_timestamp import AssetsMetaInfoMetadataTimestamp from src.swagger_models.assets_result import AssetsResult from src.swagger_models.assets_result_result import AssetsResultResult +from src.swagger_models.dataset_evaluation import DatasetEvaluation +from src.swagger_models.dataset_evaluation_element import DatasetEvaluationElement +from src.swagger_models.dataset_evaluation_result import DatasetEvaluationResult +from src.swagger_models.dataset_evaluation_result_result import DatasetEvaluationResultResult from src.swagger_models.dataset_result import DatasetResult from src.swagger_models.dataset_result_result import DatasetResultResult from src.swagger_models.dataset_result_result_negative_info import DatasetResultResultNegativeInfo diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py b/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py index 22cbaca307..aa4ad614f2 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/annotations.py @@ -1,13 +1,13 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util -from src.swagger_models.annotations_inner import AnnotationsInner # noqa: F401,E501 from src.swagger_models.base_model_ import Model +from src.swagger_models.annotations_inner import AnnotationsInner # noqa: F401,E501 +from src import util class Annotations(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py b/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py index 75dfe8462e..3fad26815c 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/annotations_inner.py @@ -1,12 +1,12 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model +from src import util class AnnotationsInner(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py b/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py index 356bad5ffe..6093786f0d 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/api_response.py @@ -1,12 +1,12 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model +from src import util class ApiResponse(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py index 38df0cf8df..5c5ec1c360 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/asset_info.py @@ -1,12 +1,12 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model +from src import util class AssetInfo(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py index cb87949d37..1976a127a5 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/asset_meta_result.py @@ -1,14 +1,14 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util +from src.swagger_models.base_model_ import Model from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 from src.swagger_models.assets_meta_info import AssetsMetaInfo # noqa: F401,E501 -from src.swagger_models.base_model_ import Model +from src import util class AssetMetaResult(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py index 09fa3cd9ae..95ed3d1f08 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info.py @@ -1,14 +1,14 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util +from src.swagger_models.base_model_ import Model from src.swagger_models.annotations import Annotations # noqa: F401,E501 from src.swagger_models.assets_meta_info_metadata import AssetsMetaInfoMetadata # noqa: F401,E501 -from src.swagger_models.base_model_ import Model +from src import util class AssetsMetaInfo(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py index 0e3944fdbc..f8fc21bc16 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata.py @@ -1,13 +1,13 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util -from src.swagger_models.assets_meta_info_metadata_timestamp import AssetsMetaInfoMetadataTimestamp # noqa: F401,E501 from src.swagger_models.base_model_ import Model +from src.swagger_models.assets_meta_info_metadata_timestamp import AssetsMetaInfoMetadataTimestamp # noqa: F401,E501 +from src import util class AssetsMetaInfoMetadata(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py index 533169ac4f..1353e18775 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/assets_meta_info_metadata_timestamp.py @@ -1,12 +1,12 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model +from src import util class AssetsMetaInfoMetadataTimestamp(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py index d3fb152257..affa53503d 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result.py @@ -1,14 +1,14 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util +from src.swagger_models.base_model_ import Model from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 from src.swagger_models.assets_result_result import AssetsResultResult # noqa: F401,E501 -from src.swagger_models.base_model_ import Model +from src import util class AssetsResult(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py index f39dfae022..9d3879320d 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/assets_result_result.py @@ -1,13 +1,13 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util -from src.swagger_models.asset_info import AssetInfo # noqa: F401,E501 from src.swagger_models.base_model_ import Model +from src.swagger_models.asset_info import AssetInfo # noqa: F401,E501 +from src import util class AssetsResultResult(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py b/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py index c17c808284..f168c28658 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/base_model_.py @@ -1,7 +1,7 @@ import pprint -import typing import six +import typing from src import util diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation.py new file mode 100644 index 0000000000..8e03515da8 --- /dev/null +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from src.swagger_models.base_model_ import Model +from src.swagger_models.dataset_evaluation_element import DatasetEvaluationElement # noqa: F401,E501 +from src import util + + +class DatasetEvaluation(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, ci_evaluations: Dict[str, DatasetEvaluationElement]=None, topic_evaluations: Dict[str, DatasetEvaluationElement]=None, ci_averaged_evaluation: DatasetEvaluationElement=None): # noqa: E501 + """DatasetEvaluation - a model defined in Swagger + + :param ci_evaluations: The ci_evaluations of this DatasetEvaluation. # noqa: E501 + :type ci_evaluations: Dict[str, DatasetEvaluationElement] + :param topic_evaluations: The topic_evaluations of this DatasetEvaluation. # noqa: E501 + :type topic_evaluations: Dict[str, DatasetEvaluationElement] + :param ci_averaged_evaluation: The ci_averaged_evaluation of this DatasetEvaluation. # noqa: E501 + :type ci_averaged_evaluation: DatasetEvaluationElement + """ + self.swagger_types = { + 'ci_evaluations': Dict[str, DatasetEvaluationElement], + 'topic_evaluations': Dict[str, DatasetEvaluationElement], + 'ci_averaged_evaluation': DatasetEvaluationElement + } + + self.attribute_map = { + 'ci_evaluations': 'ci_evaluations', + 'topic_evaluations': 'topic_evaluations', + 'ci_averaged_evaluation': 'ci_averaged_evaluation' + } + self._ci_evaluations = ci_evaluations + self._topic_evaluations = topic_evaluations + self._ci_averaged_evaluation = ci_averaged_evaluation + + @classmethod + def from_dict(cls, dikt) -> 'DatasetEvaluation': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The DatasetEvaluation of this DatasetEvaluation. # noqa: E501 + :rtype: DatasetEvaluation + """ + return util.deserialize_model(dikt, cls) + + @property + def ci_evaluations(self) -> Dict[str, DatasetEvaluationElement]: + """Gets the ci_evaluations of this DatasetEvaluation. + + + :return: The ci_evaluations of this DatasetEvaluation. + :rtype: Dict[str, DatasetEvaluationElement] + """ + return self._ci_evaluations + + @ci_evaluations.setter + def ci_evaluations(self, ci_evaluations: Dict[str, DatasetEvaluationElement]): + """Sets the ci_evaluations of this DatasetEvaluation. + + + :param ci_evaluations: The ci_evaluations of this DatasetEvaluation. + :type ci_evaluations: Dict[str, DatasetEvaluationElement] + """ + + self._ci_evaluations = ci_evaluations + + @property + def topic_evaluations(self) -> Dict[str, DatasetEvaluationElement]: + """Gets the topic_evaluations of this DatasetEvaluation. + + + :return: The topic_evaluations of this DatasetEvaluation. + :rtype: Dict[str, DatasetEvaluationElement] + """ + return self._topic_evaluations + + @topic_evaluations.setter + def topic_evaluations(self, topic_evaluations: Dict[str, DatasetEvaluationElement]): + """Sets the topic_evaluations of this DatasetEvaluation. + + + :param topic_evaluations: The topic_evaluations of this DatasetEvaluation. + :type topic_evaluations: Dict[str, DatasetEvaluationElement] + """ + + self._topic_evaluations = topic_evaluations + + @property + def ci_averaged_evaluation(self) -> DatasetEvaluationElement: + """Gets the ci_averaged_evaluation of this DatasetEvaluation. + + + :return: The ci_averaged_evaluation of this DatasetEvaluation. + :rtype: DatasetEvaluationElement + """ + return self._ci_averaged_evaluation + + @ci_averaged_evaluation.setter + def ci_averaged_evaluation(self, ci_averaged_evaluation: DatasetEvaluationElement): + """Sets the ci_averaged_evaluation of this DatasetEvaluation. + + + :param ci_averaged_evaluation: The ci_averaged_evaluation of this DatasetEvaluation. + :type ci_averaged_evaluation: DatasetEvaluationElement + """ + + self._ci_averaged_evaluation = ci_averaged_evaluation diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_element.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_element.py new file mode 100644 index 0000000000..c03a697f2f --- /dev/null +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_element.py @@ -0,0 +1,166 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from src.swagger_models.base_model_ import Model +from src import util + + +class DatasetEvaluationElement(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, ap: float=None, ar: float=None, tp: int=None, fp: int=None, fn: int=None): # noqa: E501 + """DatasetEvaluationElement - a model defined in Swagger + + :param ap: The ap of this DatasetEvaluationElement. # noqa: E501 + :type ap: float + :param ar: The ar of this DatasetEvaluationElement. # noqa: E501 + :type ar: float + :param tp: The tp of this DatasetEvaluationElement. # noqa: E501 + :type tp: int + :param fp: The fp of this DatasetEvaluationElement. # noqa: E501 + :type fp: int + :param fn: The fn of this DatasetEvaluationElement. # noqa: E501 + :type fn: int + """ + self.swagger_types = { + 'ap': float, + 'ar': float, + 'tp': int, + 'fp': int, + 'fn': int + } + + self.attribute_map = { + 'ap': 'ap', + 'ar': 'ar', + 'tp': 'tp', + 'fp': 'fp', + 'fn': 'fn' + } + self._ap = ap + self._ar = ar + self._tp = tp + self._fp = fp + self._fn = fn + + @classmethod + def from_dict(cls, dikt) -> 'DatasetEvaluationElement': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The DatasetEvaluationElement of this DatasetEvaluationElement. # noqa: E501 + :rtype: DatasetEvaluationElement + """ + return util.deserialize_model(dikt, cls) + + @property + def ap(self) -> float: + """Gets the ap of this DatasetEvaluationElement. + + + :return: The ap of this DatasetEvaluationElement. + :rtype: float + """ + return self._ap + + @ap.setter + def ap(self, ap: float): + """Sets the ap of this DatasetEvaluationElement. + + + :param ap: The ap of this DatasetEvaluationElement. + :type ap: float + """ + + self._ap = ap + + @property + def ar(self) -> float: + """Gets the ar of this DatasetEvaluationElement. + + + :return: The ar of this DatasetEvaluationElement. + :rtype: float + """ + return self._ar + + @ar.setter + def ar(self, ar: float): + """Sets the ar of this DatasetEvaluationElement. + + + :param ar: The ar of this DatasetEvaluationElement. + :type ar: float + """ + + self._ar = ar + + @property + def tp(self) -> int: + """Gets the tp of this DatasetEvaluationElement. + + + :return: The tp of this DatasetEvaluationElement. + :rtype: int + """ + return self._tp + + @tp.setter + def tp(self, tp: int): + """Sets the tp of this DatasetEvaluationElement. + + + :param tp: The tp of this DatasetEvaluationElement. + :type tp: int + """ + + self._tp = tp + + @property + def fp(self) -> int: + """Gets the fp of this DatasetEvaluationElement. + + + :return: The fp of this DatasetEvaluationElement. + :rtype: int + """ + return self._fp + + @fp.setter + def fp(self, fp: int): + """Sets the fp of this DatasetEvaluationElement. + + + :param fp: The fp of this DatasetEvaluationElement. + :type fp: int + """ + + self._fp = fp + + @property + def fn(self) -> int: + """Gets the fn of this DatasetEvaluationElement. + + + :return: The fn of this DatasetEvaluationElement. + :rtype: int + """ + return self._fn + + @fn.setter + def fn(self, fn: int): + """Sets the fn of this DatasetEvaluationElement. + + + :param fn: The fn of this DatasetEvaluationElement. + :type fn: int + """ + + self._fn = fn diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result.py new file mode 100644 index 0000000000..50cf8b4b58 --- /dev/null +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result.py @@ -0,0 +1,142 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from src.swagger_models.base_model_ import Model +from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 +from src.swagger_models.dataset_evaluation_result_result import DatasetEvaluationResultResult # noqa: F401,E501 +from src import util + + +class DatasetEvaluationResult(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, code: int=None, request_id: str=None, message: str=None, result: Dict[str, DatasetEvaluationResultResult]=None): # noqa: E501 + """DatasetEvaluationResult - a model defined in Swagger + + :param code: The code of this DatasetEvaluationResult. # noqa: E501 + :type code: int + :param request_id: The request_id of this DatasetEvaluationResult. # noqa: E501 + :type request_id: str + :param message: The message of this DatasetEvaluationResult. # noqa: E501 + :type message: str + :param result: The result of this DatasetEvaluationResult. # noqa: E501 + :type result: Dict[str, DatasetEvaluationResultResult] + """ + self.swagger_types = { + 'code': int, + 'request_id': str, + 'message': str, + 'result': Dict[str, DatasetEvaluationResultResult] + } + + self.attribute_map = { + 'code': 'code', + 'request_id': 'request_id', + 'message': 'message', + 'result': 'result' + } + self._code = code + self._request_id = request_id + self._message = message + self._result = result + + @classmethod + def from_dict(cls, dikt) -> 'DatasetEvaluationResult': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The DatasetEvaluationResult of this DatasetEvaluationResult. # noqa: E501 + :rtype: DatasetEvaluationResult + """ + return util.deserialize_model(dikt, cls) + + @property + def code(self) -> int: + """Gets the code of this DatasetEvaluationResult. + + + :return: The code of this DatasetEvaluationResult. + :rtype: int + """ + return self._code + + @code.setter + def code(self, code: int): + """Sets the code of this DatasetEvaluationResult. + + + :param code: The code of this DatasetEvaluationResult. + :type code: int + """ + + self._code = code + + @property + def request_id(self) -> str: + """Gets the request_id of this DatasetEvaluationResult. + + + :return: The request_id of this DatasetEvaluationResult. + :rtype: str + """ + return self._request_id + + @request_id.setter + def request_id(self, request_id: str): + """Sets the request_id of this DatasetEvaluationResult. + + + :param request_id: The request_id of this DatasetEvaluationResult. + :type request_id: str + """ + + self._request_id = request_id + + @property + def message(self) -> str: + """Gets the message of this DatasetEvaluationResult. + + + :return: The message of this DatasetEvaluationResult. + :rtype: str + """ + return self._message + + @message.setter + def message(self, message: str): + """Sets the message of this DatasetEvaluationResult. + + + :param message: The message of this DatasetEvaluationResult. + :type message: str + """ + + self._message = message + + @property + def result(self) -> Dict[str, DatasetEvaluationResultResult]: + """Gets the result of this DatasetEvaluationResult. + + + :return: The result of this DatasetEvaluationResult. + :rtype: Dict[str, DatasetEvaluationResultResult] + """ + return self._result + + @result.setter + def result(self, result: Dict[str, DatasetEvaluationResultResult]): + """Sets the result of this DatasetEvaluationResult. + + + :param result: The result of this DatasetEvaluationResult. + :type result: Dict[str, DatasetEvaluationResultResult] + """ + + self._result = result diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result_result.py new file mode 100644 index 0000000000..48cff51cf7 --- /dev/null +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_evaluation_result_result.py @@ -0,0 +1,115 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from src.swagger_models.base_model_ import Model +from src.swagger_models.dataset_evaluation import DatasetEvaluation # noqa: F401,E501 +from src import util + + +class DatasetEvaluationResultResult(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, conf_thr: float=None, iou_evaluations: Dict[str, DatasetEvaluation]=None, iou_averaged_evaluation: DatasetEvaluation=None): # noqa: E501 + """DatasetEvaluationResultResult - a model defined in Swagger + + :param conf_thr: The conf_thr of this DatasetEvaluationResultResult. # noqa: E501 + :type conf_thr: float + :param iou_evaluations: The iou_evaluations of this DatasetEvaluationResultResult. # noqa: E501 + :type iou_evaluations: Dict[str, DatasetEvaluation] + :param iou_averaged_evaluation: The iou_averaged_evaluation of this DatasetEvaluationResultResult. # noqa: E501 + :type iou_averaged_evaluation: DatasetEvaluation + """ + self.swagger_types = { + 'conf_thr': float, + 'iou_evaluations': Dict[str, DatasetEvaluation], + 'iou_averaged_evaluation': DatasetEvaluation + } + + self.attribute_map = { + 'conf_thr': 'conf_thr', + 'iou_evaluations': 'iou_evaluations', + 'iou_averaged_evaluation': 'iou_averaged_evaluation' + } + self._conf_thr = conf_thr + self._iou_evaluations = iou_evaluations + self._iou_averaged_evaluation = iou_averaged_evaluation + + @classmethod + def from_dict(cls, dikt) -> 'DatasetEvaluationResultResult': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The DatasetEvaluationResult_result of this DatasetEvaluationResultResult. # noqa: E501 + :rtype: DatasetEvaluationResultResult + """ + return util.deserialize_model(dikt, cls) + + @property + def conf_thr(self) -> float: + """Gets the conf_thr of this DatasetEvaluationResultResult. + + + :return: The conf_thr of this DatasetEvaluationResultResult. + :rtype: float + """ + return self._conf_thr + + @conf_thr.setter + def conf_thr(self, conf_thr: float): + """Sets the conf_thr of this DatasetEvaluationResultResult. + + + :param conf_thr: The conf_thr of this DatasetEvaluationResultResult. + :type conf_thr: float + """ + + self._conf_thr = conf_thr + + @property + def iou_evaluations(self) -> Dict[str, DatasetEvaluation]: + """Gets the iou_evaluations of this DatasetEvaluationResultResult. + + + :return: The iou_evaluations of this DatasetEvaluationResultResult. + :rtype: Dict[str, DatasetEvaluation] + """ + return self._iou_evaluations + + @iou_evaluations.setter + def iou_evaluations(self, iou_evaluations: Dict[str, DatasetEvaluation]): + """Sets the iou_evaluations of this DatasetEvaluationResultResult. + + + :param iou_evaluations: The iou_evaluations of this DatasetEvaluationResultResult. + :type iou_evaluations: Dict[str, DatasetEvaluation] + """ + + self._iou_evaluations = iou_evaluations + + @property + def iou_averaged_evaluation(self) -> DatasetEvaluation: + """Gets the iou_averaged_evaluation of this DatasetEvaluationResultResult. + + + :return: The iou_averaged_evaluation of this DatasetEvaluationResultResult. + :rtype: DatasetEvaluation + """ + return self._iou_averaged_evaluation + + @iou_averaged_evaluation.setter + def iou_averaged_evaluation(self, iou_averaged_evaluation: DatasetEvaluation): + """Sets the iou_averaged_evaluation of this DatasetEvaluationResultResult. + + + :param iou_averaged_evaluation: The iou_averaged_evaluation of this DatasetEvaluationResultResult. + :type iou_averaged_evaluation: DatasetEvaluation + """ + + self._iou_averaged_evaluation = iou_averaged_evaluation diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py index 6dd34ee452..aa8a36798f 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result.py @@ -1,14 +1,14 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 from src.swagger_models.base_model_ import Model +from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 from src.swagger_models.dataset_result_result import DatasetResultResult # noqa: F401,E501 +from src import util class DatasetResult(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py index 430a154c7a..e808429957 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result.py @@ -1,13 +1,13 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model from src.swagger_models.dataset_result_result_negative_info import DatasetResultResultNegativeInfo # noqa: F401,E501 +from src import util class DatasetResultResult(Model): @@ -15,11 +15,13 @@ class DatasetResultResult(Model): Do not edit the class manually. """ - def __init__(self, total_images_cnt: int=None, class_names_count: object=None, ignored_labels: object=None, negative_info: DatasetResultResultNegativeInfo=None): # noqa: E501 + def __init__(self, total_images_cnt: int=None, class_ids_count: object=None, class_names_count: object=None, ignored_labels: object=None, negative_info: DatasetResultResultNegativeInfo=None): # noqa: E501 """DatasetResultResult - a model defined in Swagger :param total_images_cnt: The total_images_cnt of this DatasetResultResult. # noqa: E501 :type total_images_cnt: int + :param class_ids_count: The class_ids_count of this DatasetResultResult. # noqa: E501 + :type class_ids_count: object :param class_names_count: The class_names_count of this DatasetResultResult. # noqa: E501 :type class_names_count: object :param ignored_labels: The ignored_labels of this DatasetResultResult. # noqa: E501 @@ -29,6 +31,7 @@ def __init__(self, total_images_cnt: int=None, class_names_count: object=None, i """ self.swagger_types = { 'total_images_cnt': int, + 'class_ids_count': object, 'class_names_count': object, 'ignored_labels': object, 'negative_info': DatasetResultResultNegativeInfo @@ -36,11 +39,13 @@ def __init__(self, total_images_cnt: int=None, class_names_count: object=None, i self.attribute_map = { 'total_images_cnt': 'total_images_cnt', + 'class_ids_count': 'class_ids_count', 'class_names_count': 'class_names_count', 'ignored_labels': 'ignored_labels', 'negative_info': 'negative_info' } self._total_images_cnt = total_images_cnt + self._class_ids_count = class_ids_count self._class_names_count = class_names_count self._ignored_labels = ignored_labels self._negative_info = negative_info @@ -77,6 +82,27 @@ def total_images_cnt(self, total_images_cnt: int): self._total_images_cnt = total_images_cnt + @property + def class_ids_count(self) -> object: + """Gets the class_ids_count of this DatasetResultResult. + + + :return: The class_ids_count of this DatasetResultResult. + :rtype: object + """ + return self._class_ids_count + + @class_ids_count.setter + def class_ids_count(self, class_ids_count: object): + """Sets the class_ids_count of this DatasetResultResult. + + + :param class_ids_count: The class_ids_count of this DatasetResultResult. + :type class_ids_count: object + """ + + self._class_ids_count = class_ids_count + @property def class_names_count(self) -> object: """Gets the class_names_count of this DatasetResultResult. diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py index 6aa95ed31a..e5fc22e3dc 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/dataset_result_result_negative_info.py @@ -1,12 +1,12 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model +from src import util class DatasetResultResultNegativeInfo(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py index 4f70b10ea0..ae39409b13 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/model_result.py @@ -1,14 +1,14 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util -from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 from src.swagger_models.base_model_ import Model +from src.swagger_models.api_response import ApiResponse # noqa: F401,E501 from src.swagger_models.model_result_result import ModelResultResult # noqa: F401,E501 +from src import util class ModelResult(Model): diff --git a/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py b/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py index 026bef16a2..486c8c55f5 100644 --- a/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py +++ b/ymir/backend/src/ymir_viz/src/swagger_models/model_result_result.py @@ -1,12 +1,12 @@ # coding: utf-8 from __future__ import absolute_import - from datetime import date, datetime # noqa: F401 + from typing import List, Dict # noqa: F401 -from src import util from src.swagger_models.base_model_ import Model +from src import util class ModelResultResult(Model): diff --git a/ymir/backend/src/ymir_viz/src/viz_models/asset.py b/ymir/backend/src/ymir_viz/src/viz_models/asset.py index 124937abbc..d9fc985765 100644 --- a/ymir/backend/src/ymir_viz/src/viz_models/asset.py +++ b/ymir/backend/src/ymir_viz/src/viz_models/asset.py @@ -69,7 +69,8 @@ def set_asset_content_cache( with redis_cache.pipeline() as pipe: for class_id, assets_list in asset_content["class_ids_index"].items(): - pipe.rpush(f"{key_asset_index}:{class_id}", *assets_list) + if assets_list: + pipe.rpush(f"{key_asset_index}:{class_id}", *assets_list) pipe.execute() redis_cache.set(key_cache_status, {"flag": 1}) diff --git a/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py b/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py index 9cd2268410..0178a58452 100644 --- a/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py +++ b/ymir/backend/src/ymir_viz/src/viz_models/pb_reader.py @@ -30,7 +30,7 @@ def get_model_info(self) -> Dict: def get_dataset_info(self) -> Dict: """ - exampled return data: + return value example: { "class_ids_count": {3: 34}, "class_names_count": {'cat': 34}, @@ -56,7 +56,7 @@ def get_dataset_info(self) -> Dict: def get_assets_content(self) -> Dict: """ - exampled data: + return value example: { "all_asset_ids": ["asset_id"], "asset_ids_detail": { @@ -81,3 +81,64 @@ def get_assets_content(self) -> Dict: assets_info["class_ids_index"][viz_settings.VIZ_ALL_INDEX_CLASSIDS] = assets_info["all_asset_ids"] return assets_info + + def get_dataset_evaluations(self) -> Dict: + """ + return value example: + { + "dataset_hash":{ + "iou_averaged_evaluation":{ + "ci_averaged_evaluation":{ + "ap":1.0, + "ar":1.0, + "fn":0, + "fp":0, + "tp":4329 + }, + "ci_evaluations":{ + "4":{ + "ap":1.0, + "ar":1.0, + "fn":0, + "fp":0, + "tp":91 + } + }, + "topic_evaluations":{} + }, + "iou_evaluations":{ + "0.50":{ + "ci_averaged_evaluation":{ + "ap":1.0, + "ar":1.0, + "fn":0, + "fp":0, + "tp":4329 + }, + "ci_evaluations":{ + "2":{ + "ap":1.0, + "ar":1.0, + "fn":0, + "fp":0, + "tp":4238 + } + }, + "topic_evaluations":{} + }, + "topic_evaluations":{} + } + } + } + """ + try: + evaluation = mir_storage_ops.MirStorageOps.load_dataset_evaluations( + mir_root=self.mir_root, + mir_branch=self.branch_id, + mir_task_id=self.task_id, + ) + except errors.MirError: + logging.exception("evaluation %s not found", self.branch_id) + raise exceptions.DatasetEvaluationNotExists(f"evaluation {self.branch_id} not found") + + return evaluation diff --git a/ymir/backend/tox.ini b/ymir/backend/tox.ini index 1aa0449b4f..89182fddfa 100644 --- a/ymir/backend/tox.ini +++ b/ymir/backend/tox.ini @@ -66,5 +66,4 @@ commands = git config --global user.name 'ci' mypy src/ymir_app mypy src/ymir_controller mypy src/ymir_monitor - mypy src/ymir_postman mypy src/ymir_viz diff --git a/ymir/command/mir/cli.py b/ymir/command/mir/cli.py index 25d363a9d6..680567f711 100644 --- a/ymir/command/mir/cli.py +++ b/ymir/command/mir/cli.py @@ -6,12 +6,12 @@ from typing import Any, cast, Protocol from mir import version -from mir.commands import (init, branch, checkout, commit, copy, exporting, filter, log, merge, reset, sampling, show, - status, training, mining, importing, infer, model_importing) +from mir.commands import (init, branch, checkout, commit, copy, evaluate, exporting, filter, log, merge, reset, + sampling, show, status, training, mining, importing, infer, model_importing) _COMMANDS_ = [ - init, branch, checkout, commit, copy, exporting, filter, log, merge, reset, sampling, show, status, training, - mining, importing, infer, model_importing + init, branch, checkout, commit, copy, evaluate, exporting, filter, log, merge, reset, sampling, show, status, + training, mining, importing, infer, model_importing ] diff --git a/ymir/command/mir/commands/commit.py b/ymir/command/mir/commands/commit.py index 686b7eb228..0d9b49d898 100644 --- a/ymir/command/mir/commands/commit.py +++ b/ymir/command/mir/commands/commit.py @@ -1,10 +1,9 @@ import argparse import logging -import os from mir import scm from mir.commands import base -from mir.tools import checker, mir_repo_utils, mir_storage +from mir.tools import checker, mir_repo_utils from mir.tools.code import MirCode @@ -22,12 +21,7 @@ def run_with_args(mir_root: str, msg: str) -> int: return MirCode.RC_CMD_INVALID_MIR_REPO repo_git = scm.Scm(root_dir=mir_root, scm_executable='git') - - all_mir_names = mir_storage.get_all_mir_paths() - for f in all_mir_names: - if os.path.isfile(os.path.join(mir_root, f)): - repo_git.add(f) - + repo_git.add('.') output_str = repo_git.commit(["-m", msg]) logging.info("\n%s" % output_str) diff --git a/ymir/command/mir/commands/evaluate.py b/ymir/command/mir/commands/evaluate.py new file mode 100644 index 0000000000..8ce0aa31ac --- /dev/null +++ b/ymir/command/mir/commands/evaluate.py @@ -0,0 +1,103 @@ +import argparse +import logging + +from mir.commands import base +from mir.tools import checker, det_eval, mir_storage_ops, revs_parser +from mir.tools.code import MirCode +from mir.tools.command_run_in_out import command_run_in_out +from mir.protos import mir_command_pb2 as mirpb + + +class CmdEvaluate(base.BaseCommand): + def run(self) -> int: + logging.info(f"command evaluate: {self.args}") + + return CmdEvaluate.run_with_args(work_dir=self.args.work_dir, + src_revs=self.args.src_revs, + dst_rev=self.args.dst_rev, + gt_rev=self.args.gt_rev, + mir_root=self.args.mir_root, + conf_thr=self.args.conf_thr, + iou_thrs=self.args.iou_thrs, + need_pr_curve=self.args.need_pr_curve) + + @staticmethod + @command_run_in_out + def run_with_args(work_dir: str, src_revs: str, dst_rev: str, gt_rev: str, mir_root: str, conf_thr: float, + iou_thrs: str, need_pr_curve: bool) -> int: + src_rev_tids = revs_parser.parse_arg_revs(src_revs) + gt_rev_tid = revs_parser.parse_single_arg_rev(gt_rev, need_tid=False) + dst_rev_tid = revs_parser.parse_single_arg_rev(dst_rev, need_tid=True) + + return_code = checker.check(mir_root, + [checker.Prerequisites.IS_INSIDE_MIR_REPO, checker.Prerequisites.IS_CLEAN]) + if return_code != MirCode.RC_OK: + return return_code + + # read pred and gt + mir_gt = det_eval.MirCoco(mir_root=mir_root, rev_tid=gt_rev_tid, conf_thr=conf_thr) + mir_dts = mir_gt.load_dts_from_gt(mir_root=mir_root, rev_tids=src_rev_tids, conf_thr=conf_thr) + + # eval + evaluate_config = mirpb.EvaluateConfig() + evaluate_config.conf_thr = conf_thr + evaluate_config.iou_thrs_interval = iou_thrs + evaluate_config.need_pr_curve = need_pr_curve + evaluate_config.gt_dataset_id = mir_gt.dataset_id + evaluate_config.pred_dataset_ids.extend([mir_dt.dataset_id for mir_dt in mir_dts]) + evaluation = det_eval.det_evaluate(mir_dts=mir_dts, mir_gt=mir_gt, config=evaluate_config) + + _show_evaluation(evaluation=evaluation) + + # save and commit + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeEvaluate, + task_id=dst_rev_tid.tid, + message='evaluate', + evaluation=evaluation, + src_revs=src_revs, + dst_rev=dst_rev) + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=mir_root, + mir_branch=dst_rev_tid.rev, + his_branch=src_rev_tids[0].rev, + mir_datas={}, + task=task) + + return MirCode.RC_OK + + +def _show_evaluation(evaluation: mirpb.Evaluation) -> None: + for dataset_id, dataset_evaluation in evaluation.dataset_evaluations.items(): + cae = dataset_evaluation.iou_averaged_evaluation.ci_averaged_evaluation + logging.info(f"gt: {evaluation.config.gt_dataset_id} vs pred: {dataset_id}, mAP: {cae.ap}") + + +def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: + evaluate_arg_parser = subparsers.add_parser('evaluate', + parents=[parent_parser], + description='use this command to evaluate model with ground truth', + help='evaluate model with ground truth') + evaluate_arg_parser.add_argument('-w', dest='work_dir', type=str, help='work place for training') + evaluate_arg_parser.add_argument("--src-revs", dest="src_revs", type=str, required=True, help="prediction rev@tid") + evaluate_arg_parser.add_argument("--gt-rev", dest="gt_rev", type=str, required=True, help="ground truth rev@tid") + evaluate_arg_parser.add_argument("--dst-rev", + dest="dst_rev", + type=str, + required=True, + help="rev@tid: destination branch name and task id") + evaluate_arg_parser.add_argument('--conf-thr', + dest='conf_thr', + type=float, + required=False, + default=0.3, + help='confidence threshold, default 0.3') + evaluate_arg_parser.add_argument('--iou-thrs', + dest='iou_thrs', + type=str, + required=False, + default='0.5:1.0:0.05', + help='iou thresholds, default 0.5:1.0:0.05, upper bound is excluded') + evaluate_arg_parser.add_argument('--need-pr-curve', + dest='need_pr_curve', + action='store_true', + help='also generates pr curve in evaluation result') + evaluate_arg_parser.set_defaults(func=CmdEvaluate) diff --git a/ymir/command/mir/commands/exporting.py b/ymir/command/mir/commands/exporting.py index d5c0d355ba..f3a97e036a 100644 --- a/ymir/command/mir/commands/exporting.py +++ b/ymir/command/mir/commands/exporting.py @@ -7,6 +7,7 @@ from mir.tools import checker, class_ids, data_exporter, mir_repo_utils, mir_storage_ops, revs_parser from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out +from mir.tools.errors import MirRuntimeError from mir.tools.phase_logger import PhaseLoggerCenter @@ -68,7 +69,11 @@ def run_with_args(mir_root: str, asset_dir: str, annotation_dir: str, media_loca return MirCode.RC_CMD_INVALID_ARGS cls_mgr = class_ids.ClassIdManager(mir_root=mir_root) - type_ids_list = cls_mgr.id_for_names(in_cis.split(';')) if in_cis else [] + class_names = in_cis.split(';') if in_cis else [] + type_ids_list, unknown_names = cls_mgr.id_for_names(class_names) + if unknown_names: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"unknown class names: {unknown_names}") # export data_exporter.export(mir_root=mir_root, diff --git a/ymir/command/mir/commands/filter.py b/ymir/command/mir/commands/filter.py index 4843d32466..f37a28f249 100644 --- a/ymir/command/mir/commands/filter.py +++ b/ymir/command/mir/commands/filter.py @@ -21,7 +21,13 @@ def __preds_set_from_str(preds_str: str, cls_mgr: class_ids.ClassIdManager) -> S if not preds_str: return set() - return set(cls_mgr.id_for_names(preds_str.split(";"))) + class_names = preds_str.split(";") + class_ids, unknown_names = cls_mgr.id_for_names(class_names) + if unknown_names: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"unknwon class names: {unknown_names}") + + return set(class_ids) @staticmethod def __include_match(asset_ids_set: Set[str], mir_keywords: mirpb.MirKeywords, attr_name: str, diff --git a/ymir/command/mir/commands/importing.py b/ymir/command/mir/commands/importing.py index a02d1b7460..d1d9508c98 100644 --- a/ymir/command/mir/commands/importing.py +++ b/ymir/command/mir/commands/importing.py @@ -20,7 +20,6 @@ def run(self) -> int: return CmdImport.run_with_args(mir_root=self.args.mir_root, index_file=self.args.index_file, - ck_file='', anno_abs=self.args.anno, gen_abs=self.args.gen, dataset_name=self.args.dataset_name, @@ -31,7 +30,7 @@ def run(self) -> int: @staticmethod @command_run_in_out - def run_with_args(mir_root: str, index_file: str, ck_file: str, anno_abs: str, gen_abs: str, dataset_name: str, + def run_with_args(mir_root: str, index_file: str, anno_abs: str, gen_abs: str, dataset_name: str, dst_rev: str, src_revs: str, work_dir: str, ignore_unknown_types: bool) -> int: # Step 1: check args and prepare environment. if not index_file or not gen_abs or not os.path.isfile(index_file): @@ -76,11 +75,9 @@ def run_with_args(mir_root: str, index_file: str, ck_file: str, anno_abs: str, g return ret mir_annotation = mirpb.MirAnnotations() - mir_keywords = mirpb.MirKeywords() - ret_code, unknown_types = annotations.import_annotations(mir_annotation=mir_annotation, - mir_keywords=mir_keywords, + ret_code, unknown_types = annotations.import_annotations(mir_metadatas=mir_metadatas, + mir_annotation=mir_annotation, in_sha1_file=sha1_index_abs, - ck_file=ck_file, mir_root=mir_root, annotations_dir_path=anno_abs, task_id=dst_typ_rev_tid.tid, diff --git a/ymir/command/mir/commands/infer.py b/ymir/command/mir/commands/infer.py index 240412b154..c033c4b679 100644 --- a/ymir/command/mir/commands/infer.py +++ b/ymir/command/mir/commands/infer.py @@ -4,12 +4,12 @@ import os import subprocess import time -from typing import Any, Tuple, Optional +from typing import Any, List, Tuple, Optional import yaml from mir.commands import base -from mir.tools import settings as mir_settings, utils as mir_utils +from mir.tools import checker, class_ids, settings as mir_settings, utils as mir_utils from mir.tools.code import MirCode from mir.tools.errors import MirRuntimeError @@ -36,6 +36,7 @@ def run(self) -> int: logging.debug("command infer: %s", self.args) return CmdInfer.run_with_args(work_dir=self.args.work_dir, + mir_root=self.args.mir_root, media_path=self.args.work_dir, model_location=self.args.model_location, model_hash=self.args.model_hash, @@ -48,6 +49,7 @@ def run(self) -> int: @staticmethod def run_with_args(work_dir: str, + mir_root: str, media_path: str, model_location: str, model_hash: str, @@ -82,6 +84,8 @@ def run_with_args(work_dir: str, int: [description] """ # check args + if not mir_root: + mir_root = '.' if not work_dir: logging.error('empty --work-dir, abort') return MirCode.RC_CMD_INVALID_ARGS @@ -110,6 +114,10 @@ def run_with_args(work_dir: str, logging.error('empty --executor, abort') return MirCode.RC_CMD_INVALID_ARGS + return_code = checker.check(mir_root, [checker.Prerequisites.IS_INSIDE_MIR_REPO]) + if return_code != MirCode.RC_OK: + return return_code + if not executant_name: executant_name = task_id @@ -161,7 +169,7 @@ def run_with_args(work_dir: str, if run_infer: _process_infer_results(infer_result_file=os.path.join(work_out_path, 'infer-result.json'), - max_boxes=_get_max_boxes(config_file)) + max_boxes=_get_max_boxes(config_file), mir_root=mir_root) return MirCode.RC_OK @@ -238,7 +246,7 @@ def _prepare_assets(index_file: str, work_index_file: str, media_path: str) -> N needs_new_commit=False) -def _process_infer_results(infer_result_file: str, max_boxes: int) -> None: +def _process_infer_results(infer_result_file: str, max_boxes: int, mir_root: str) -> None: if not os.path.isfile(infer_result_file): raise MirRuntimeError(error_code=MirCode.RC_CMD_NO_RESULT, error_message=f"can not find result file: {infer_result_file}") @@ -246,12 +254,16 @@ def _process_infer_results(infer_result_file: str, max_boxes: int) -> None: with open(infer_result_file, 'r') as f: results = json.loads(f.read()) + class_id_mgr = class_ids.ClassIdManager(mir_root=mir_root) + if 'detection' in results: names_annotations_dict = results['detection'] for _, annotations_dict in names_annotations_dict.items(): if 'annotations' in annotations_dict and isinstance(annotations_dict['annotations'], list): - annotations_dict['annotations'].sort(key=(lambda x: x['score']), reverse=True) - annotations_dict['annotations'] = annotations_dict['annotations'][:max_boxes] + annotations_list: List[dict] = annotations_dict['annotations'] + annotations_list.sort(key=(lambda x: x['score']), reverse=True) + annotations_list = [a for a in annotations_list if class_id_mgr.has_name(a['class_name'])] + annotations_dict['annotations'] = annotations_list[:max_boxes] with open(infer_result_file, 'w') as f: f.write(json.dumps(results, indent=4)) @@ -315,6 +327,7 @@ def run_docker_cmd(asset_path: str, index_file_path: str, model_path: str, confi # public: cli bind def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: argparse.ArgumentParser) -> None: infer_arg_parser = subparsers.add_parser('infer', + parents=[parent_parser], description='use this command to inference images', help='inference images') infer_arg_parser.add_argument('--index-file', dest='index_file', type=str, required=True, help='path to index file') diff --git a/ymir/command/mir/commands/init.py b/ymir/command/mir/commands/init.py index dec2233bf3..0f14fa769a 100644 --- a/ymir/command/mir/commands/init.py +++ b/ymir/command/mir/commands/init.py @@ -7,7 +7,7 @@ from mir.commands import base from mir.protos import mir_command_pb2 as mirpb from mir.scm.cmd import CmdScm -from mir.tools import checker, class_ids, context, mir_storage_ops, revs_parser +from mir.tools import checker, class_ids, mir_storage_ops, revs_parser from mir.tools.code import MirCode @@ -47,7 +47,7 @@ def __commit_empty_dataset(mir_root: str, empty_rev: str) -> None: # public: run @staticmethod - def run_with_args(mir_root: str, project_class_names: str, empty_rev: str) -> int: + def run_with_args(mir_root: str, empty_rev: str) -> int: return_code = checker.check( mir_root, [checker.Prerequisites.IS_OUTSIDE_GIT_REPO, checker.Prerequisites.IS_OUTSIDE_MIR_REPO]) if return_code != MirCode.RC_OK: @@ -55,12 +55,9 @@ def run_with_args(mir_root: str, project_class_names: str, empty_rev: str) -> in class_ids.create_empty_if_not_exists(mir_root=mir_root) - project_class_ids = class_ids.ClassIdManager( - mir_root=mir_root).id_for_names(project_class_names.split(';')) if project_class_names else [] - context.save(mir_root=mir_root, project_class_ids=project_class_ids) - repo_git = scm.Scm(root_dir=mir_root, scm_executable='git') repo_git.init() + repo_git.config(['core.fileMode', 'false']) CmdInit.__update_ignore(mir_root=mir_root, git=repo_git, ignored_items=['.mir_lock', '.mir']) repo_git.commit(["-m", "first commit"]) @@ -74,7 +71,6 @@ def run(self) -> int: logging.debug("command init: %s", self.args) return self.run_with_args(mir_root=self.args.mir_root, - project_class_names=self.args.project_class_names, empty_rev=self.args.empty_rev) @@ -83,12 +79,6 @@ def bind_to_subparsers(subparsers: argparse._SubParsersAction, parent_parser: ar parents=[parent_parser], description="use this command to init mir repo", help="init mir repo") - init_arg_parser.add_argument('--project-class-names', - dest='project_class_names', - required=False, - type=str, - default='', - help='project class type names, separated by semicolon') init_arg_parser.add_argument('--with-empty-rev', dest='empty_rev', required=False, diff --git a/ymir/command/mir/commands/merge.py b/ymir/command/mir/commands/merge.py index aa70c301d7..45fbd69bcd 100644 --- a/ymir/command/mir/commands/merge.py +++ b/ymir/command/mir/commands/merge.py @@ -283,8 +283,6 @@ def run_with_args(mir_root: str, src_revs: str, ex_src_revs: str, dst_rev: str, mir_datas=mir_data, task=task) - logging.debug("mir merge: write files done") - return MirCode.RC_OK diff --git a/ymir/command/mir/commands/mining.py b/ymir/command/mir/commands/mining.py index c2e21e3d52..b15ef1661a 100644 --- a/ymir/command/mir/commands/mining.py +++ b/ymir/command/mir/commands/mining.py @@ -152,6 +152,7 @@ def run_with_args(work_dir: str, return_msg = '' try: infer.CmdInfer.run_with_args(work_dir=work_dir, + mir_root=mir_root, media_path=work_asset_path, model_location=model_location, model_hash=model_hash, @@ -289,16 +290,23 @@ def _get_infer_annotations(file_path: str, asset_ids_set: Set[str], continue asset_id = os.path.splitext(os.path.basename(asset_name))[0] if asset_id not in asset_ids_set: - logging.debug(f"unknown asset name: {asset_name}, ignore") + logging.info(f"unknown asset name: {asset_name}, ignore") continue single_image_annotations = mirpb.SingleImageAnnotations() - for idx, annotation_dict in enumerate(annotations_dict['annotations']): + idx = 0 + for annotation_dict in annotations_dict['annotations']: + class_id = cls_id_mgr.id_and_main_name_for_name(name=annotation_dict['class_name'])[0] + # ignore unknown class ids + if class_id < 0: + continue + annotation = mirpb.Annotation() annotation.index = idx json_format.ParseDict(annotation_dict['box'], annotation.box) - annotation.class_id = cls_id_mgr.id_and_main_name_for_name(annotation_dict['class_name'])[0] + annotation.class_id = class_id annotation.score = float(annotation_dict.get('score', 0)) single_image_annotations.annotations.append(annotation) + idx += 1 asset_id_to_annotations[asset_id] = single_image_annotations return asset_id_to_annotations diff --git a/ymir/command/mir/commands/model_importing.py b/ymir/command/mir/commands/model_importing.py index 9f203d6313..0f82fa97a8 100644 --- a/ymir/command/mir/commands/model_importing.py +++ b/ymir/command/mir/commands/model_importing.py @@ -7,7 +7,7 @@ from mir.commands import base from mir.protos import mir_command_pb2 as mirpb -from mir.tools import checker, context, mir_storage_ops, revs_parser +from mir.tools import checker, mir_storage_ops, revs_parser from mir.tools import settings as mir_settings, utils as mir_utils from mir.tools.code import MirCode from mir.tools.command_run_in_out import command_run_in_out @@ -98,11 +98,6 @@ def _check_model(model_storage: mir_utils.ModelStorage, mir_root: str) -> int: raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_FILE, error_message=f"can not import model, invalid producer: {producer}") - # check class names - class_names = model_storage.class_names - if not context.check_class_names(mir_root=mir_root, current_class_names=class_names): - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='user class ids mismatch') - return MirCode.RC_OK diff --git a/ymir/command/mir/commands/training.py b/ymir/command/mir/commands/training.py index c5c0758eef..8fad77b36e 100644 --- a/ymir/command/mir/commands/training.py +++ b/ymir/command/mir/commands/training.py @@ -1,11 +1,13 @@ import argparse import logging import os +import time import subprocess from subprocess import CalledProcessError import traceback from typing import Any, List, Optional, Set, Tuple +from tensorboardX import SummaryWriter import yaml from mir.commands import base @@ -274,10 +276,13 @@ def run_with_args(work_dir: str, # type names to type ids # ['cat', 'person'] -> [4, 2] cls_mgr = class_ids.ClassIdManager(mir_root=mir_root) - type_ids_list = cls_mgr.id_for_names(class_names) + type_ids_list, unknown_names = cls_mgr.id_for_names(class_names) if not type_ids_list: logging.info(f"type ids empty, please check config file: {config_file}") return MirCode.RC_CMD_INVALID_ARGS + if unknown_names: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message=f"unknown class names: {unknown_names}") if not context.check_class_ids(mir_root=mir_root, current_class_ids=type_ids_list): raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='user class ids mismatch') @@ -355,6 +360,11 @@ def run_with_args(work_dir: str, task_code = MirCode.RC_CMD_CONTAINER_ERROR return_msg = mir_utils.collect_executor_outlog_tail(work_dir=work_dir) + # write executor tail to tensorboard + if return_msg: + with SummaryWriter(logdir=tensorboard_dir) as tb_writer: + tb_writer.add_text(tag='executor tail', text_string=f"```\n{return_msg}\n```", walltime=time.time()) + # gen task_context task_context = { 'src_revs': src_revs, diff --git a/ymir/command/mir/protos/mir_command_pb2.py b/ymir/command/mir/protos/mir_command_pb2.py index 6110d153e6..109da61112 100644 --- a/ymir/command/mir/protos/mir_command_pb2.py +++ b/ymir/command/mir/protos/mir_command_pb2.py @@ -20,7 +20,7 @@ syntax='proto3', serialized_options=None, create_key=_descriptor._internal_create_key, - serialized_pb=b'\n\x11mir_command.proto\x12\x0bmir.command\"\xa1\x01\n\x0cMirMetadatas\x12=\n\nattributes\x18\x01 \x03(\x0b\x32).mir.command.MirMetadatas.AttributesEntry\x1aR\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.mir.command.MetadataAttributes:\x02\x38\x01\"\xe0\x01\n\x12MetadataAttributes\x12\x14\n\x0c\x64\x61taset_name\x18\x01 \x01(\t\x12)\n\ttimestamp\x18\x02 \x01(\x0b\x32\x16.mir.command.Timestamp\x12&\n\x08tvt_type\x18\x03 \x01(\x0e\x32\x14.mir.command.TvtType\x12*\n\nasset_type\x18\x04 \x01(\x0e\x32\x16.mir.command.AssetType\x12\r\n\x05width\x18\x05 \x01(\x05\x12\x0e\n\x06height\x18\x06 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x07 \x01(\x05\",\n\tTimestamp\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"\xce\x01\n\x0eMirAnnotations\x12J\n\x10task_annotations\x18\x01 \x03(\x0b\x32\x30.mir.command.MirAnnotations.TaskAnnotationsEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1aZ\n\x14TaskAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTaskAnnotations:\x02\x38\x01\"\xca\x01\n\x15SingleTaskAnnotations\x12S\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x38.mir.command.SingleTaskAnnotations.ImageAnnotationsEntry\x1a\\\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command.SingleImageAnnotations:\x02\x38\x01\"F\n\x16SingleImageAnnotations\x12,\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x17.mir.command.Annotation\"\\\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1e\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x11.mir.command.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"\xd0\x02\n\x0bMirKeywords\x12\x38\n\x08keywords\x18\x01 \x03(\x0b\x32&.mir.command.MirKeywords.KeywordsEntry\x12T\n\x17index_predifined_keyids\x18\x06 \x03(\x0b\x32\x33.mir.command.MirKeywords.IndexPredifinedKeyidsEntry\x1a\x46\n\rKeywordsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.mir.command.Keywords:\x02\x38\x01\x1aQ\n\x1aIndexPredifinedKeyidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.mir.command.Assets:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"\x1b\n\x06\x41ssets\x12\x11\n\tasset_ids\x18\x01 \x03(\t\"B\n\x08Keywords\x12\x19\n\x11predifined_keyids\x18\x01 \x03(\x05\x12\x1b\n\x13\x63ustomized_keywords\x18\x02 \x03(\t\"\x92\x01\n\x08MirTasks\x12/\n\x05tasks\x18\x01 \x03(\x0b\x32 .mir.command.MirTasks.TasksEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a?\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.mir.command.Task:\x02\x38\x01\"\xad\x03\n\x04Task\x12#\n\x04type\x18\x01 \x01(\x0e\x32\x15.mir.command.TaskType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07task_id\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\x12%\n\x05model\x18\x06 \x01(\x0b\x32\x16.mir.command.ModelMeta\x12:\n\runknown_types\x18\x07 \x03(\x0b\x32#.mir.command.Task.UnknownTypesEntry\x12\x13\n\x0breturn_code\x18\x08 \x01(\x05\x12\x12\n\nreturn_msg\x18\t \x01(\t\x12\"\n\x1aserialized_task_parameters\x18\x66 \x01(\t\x12\"\n\x1aserialized_executor_config\x18g \x01(\t\x12\x10\n\x08src_revs\x18h \x01(\t\x12\x0f\n\x07\x64st_rev\x18i \x01(\t\x12\x10\n\x08\x65xecutor\x18j \x01(\t\x1a\x33\n\x11UnknownTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x64\x10\x65J\x04\x08\x65\x10\x66\"P\n\tModelMeta\x12\x12\n\nmodel_hash\x18\x01 \x01(\t\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\"\xa5\x04\n\nMirContext\x12\x12\n\nimages_cnt\x18\x01 \x01(\x05\x12\x1b\n\x13negative_images_cnt\x18\x02 \x01(\x05\x12#\n\x1bproject_negative_images_cnt\x18\x03 \x01(\x05\x12O\n\x15predefined_keyids_cnt\x18\x04 \x03(\x0b\x32\x30.mir.command.MirContext.PredefinedKeyidsCntEntry\x12^\n\x1dproject_predefined_keyids_cnt\x18\x05 \x03(\x0b\x32\x37.mir.command.MirContext.ProjectPredefinedKeyidsCntEntry\x12S\n\x17\x63ustomized_keywords_cnt\x18\x06 \x03(\x0b\x32\x32.mir.command.MirContext.CustomizedKeywordsCntEntry\x1a:\n\x18PredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x41\n\x1fProjectPredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a<\n\x1a\x43ustomizedKeywordsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\x88\x02\n\tAssetType\x12\x14\n\x10\x41ssetTypeUnknown\x10\x00\x12\x16\n\x12\x41ssetTypeImageJpeg\x10\x01\x12\x15\n\x11\x41ssetTypeImagePng\x10\x02\x12\x1a\n\x16\x41ssetTypeImagePixelMat\x10\x03\x12\x19\n\x15\x41ssetTypeImageYuv420p\x10\x04\x12\x1a\n\x16\x41ssetTypeImageYuv420sp\x10\x05\x12\x19\n\x15\x41ssetTypeImageYuv422p\x10\x06\x12\x1a\n\x16\x41ssetTypeImageYuv422sp\x10\x07\x12\x15\n\x11\x41ssetTypeImageBmp\x10\x08\x12\x15\n\x11\x41ssetTypeVideoMp4\x10\x65*\xbd\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x10\n\x0cTaskTypeInit\x10\x0c\x12\x17\n\x13TaskTypeImportModel\x10\r\"\x04\x08\x0e\x10\x0e\"\x04\x08\x0f\x10\x0f*\x87\x01\n\tTaskState\x12\x14\n\x10TaskStateUnknown\x10\x00\x12\x14\n\x10TaskStatePending\x10\x01\x12\x14\n\x10TaskStateRunning\x10\x02\x12\x11\n\rTaskStateDone\x10\x03\x12\x12\n\x0eTaskStateError\x10\x04\x12\x11\n\rTaskStateMiss\x10\x05*L\n\x08Sha1Type\x12\x15\n\x11SHA1_TYPE_UNKNOWN\x10\x00\x12\x13\n\x0fSHA1_TYPE_ASSET\x10\x01\x12\x14\n\x10SHA1_TYPE_COMMIT\x10\x02*f\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03\x12\x0f\n\x0bMIR_CONTEXT\x10\x04*<\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x62\x06proto3' + serialized_pb=b'\n\x11mir_command.proto\x12\x0bmir.command\"\xa1\x01\n\x0cMirMetadatas\x12=\n\nattributes\x18\x01 \x03(\x0b\x32).mir.command.MirMetadatas.AttributesEntry\x1aR\n\x0f\x41ttributesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.mir.command.MetadataAttributes:\x02\x38\x01\"\xe0\x01\n\x12MetadataAttributes\x12\x14\n\x0c\x64\x61taset_name\x18\x01 \x01(\t\x12)\n\ttimestamp\x18\x02 \x01(\x0b\x32\x16.mir.command.Timestamp\x12&\n\x08tvt_type\x18\x03 \x01(\x0e\x32\x14.mir.command.TvtType\x12*\n\nasset_type\x18\x04 \x01(\x0e\x32\x16.mir.command.AssetType\x12\r\n\x05width\x18\x05 \x01(\x05\x12\x0e\n\x06height\x18\x06 \x01(\x05\x12\x16\n\x0eimage_channels\x18\x07 \x01(\x05\",\n\tTimestamp\x12\r\n\x05start\x18\x01 \x01(\x03\x12\x10\n\x08\x64uration\x18\x02 \x01(\x02\"\xce\x01\n\x0eMirAnnotations\x12J\n\x10task_annotations\x18\x01 \x03(\x0b\x32\x30.mir.command.MirAnnotations.TaskAnnotationsEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1aZ\n\x14TaskAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTaskAnnotations:\x02\x38\x01\"\xca\x01\n\x15SingleTaskAnnotations\x12S\n\x11image_annotations\x18\x01 \x03(\x0b\x32\x38.mir.command.SingleTaskAnnotations.ImageAnnotationsEntry\x1a\\\n\x15ImageAnnotationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x32\n\x05value\x18\x02 \x01(\x0b\x32#.mir.command.SingleImageAnnotations:\x02\x38\x01\"F\n\x16SingleImageAnnotations\x12,\n\x0b\x61nnotations\x18\x02 \x03(\x0b\x32\x17.mir.command.Annotation\"\\\n\nAnnotation\x12\r\n\x05index\x18\x01 \x01(\x05\x12\x1e\n\x03\x62ox\x18\x02 \x01(\x0b\x32\x11.mir.command.Rect\x12\x10\n\x08\x63lass_id\x18\x03 \x01(\x05\x12\r\n\x05score\x18\x04 \x01(\x01\"2\n\x04Rect\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\x12\t\n\x01w\x18\x03 \x01(\x05\x12\t\n\x01h\x18\x04 \x01(\x05\"\xd0\x02\n\x0bMirKeywords\x12\x38\n\x08keywords\x18\x01 \x03(\x0b\x32&.mir.command.MirKeywords.KeywordsEntry\x12T\n\x17index_predifined_keyids\x18\x06 \x03(\x0b\x32\x33.mir.command.MirKeywords.IndexPredifinedKeyidsEntry\x1a\x46\n\rKeywordsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12$\n\x05value\x18\x02 \x01(\x0b\x32\x15.mir.command.Keywords:\x02\x38\x01\x1aQ\n\x1aIndexPredifinedKeyidsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\"\n\x05value\x18\x02 \x01(\x0b\x32\x13.mir.command.Assets:\x02\x38\x01J\x04\x08\x02\x10\x03J\x04\x08\x03\x10\x04J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"\x1b\n\x06\x41ssets\x12\x11\n\tasset_ids\x18\x01 \x03(\t\"B\n\x08Keywords\x12\x19\n\x11predifined_keyids\x18\x01 \x03(\x05\x12\x1b\n\x13\x63ustomized_keywords\x18\x02 \x03(\t\"\x92\x01\n\x08MirTasks\x12/\n\x05tasks\x18\x01 \x03(\x0b\x32 .mir.command.MirTasks.TasksEntry\x12\x14\n\x0chead_task_id\x18\x02 \x01(\t\x1a?\n\nTasksEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.mir.command.Task:\x02\x38\x01\"\xda\x03\n\x04Task\x12#\n\x04type\x18\x01 \x01(\x0e\x32\x15.mir.command.TaskType\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0f\n\x07task_id\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x05 \x01(\x03\x12%\n\x05model\x18\x06 \x01(\x0b\x32\x16.mir.command.ModelMeta\x12:\n\runknown_types\x18\x07 \x03(\x0b\x32#.mir.command.Task.UnknownTypesEntry\x12\x13\n\x0breturn_code\x18\x08 \x01(\x05\x12\x12\n\nreturn_msg\x18\t \x01(\t\x12+\n\nevaluation\x18\n \x01(\x0b\x32\x17.mir.command.Evaluation\x12\"\n\x1aserialized_task_parameters\x18\x66 \x01(\t\x12\"\n\x1aserialized_executor_config\x18g \x01(\t\x12\x10\n\x08src_revs\x18h \x01(\t\x12\x0f\n\x07\x64st_rev\x18i \x01(\t\x12\x10\n\x08\x65xecutor\x18j \x01(\t\x1a\x33\n\x11UnknownTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01J\x04\x08\x04\x10\x05J\x04\x08\x64\x10\x65J\x04\x08\x65\x10\x66\"P\n\tModelMeta\x12\x12\n\nmodel_hash\x18\x01 \x01(\t\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12\x0f\n\x07\x63ontext\x18\x03 \x01(\t\"\xe8\x01\n\nEvaluation\x12+\n\x06\x63onfig\x18\x01 \x01(\x0b\x32\x1b.mir.command.EvaluateConfig\x12L\n\x13\x64\x61taset_evaluations\x18\x02 \x03(\x0b\x32/.mir.command.Evaluation.DatasetEvaluationsEntry\x1a_\n\x17\x44\x61tasetEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x33\n\x05value\x18\x02 \x01(\x0b\x32$.mir.command.SingleDatasetEvaluation:\x02\x38\x01\"\x85\x01\n\x0e\x45valuateConfig\x12\x15\n\rgt_dataset_id\x18\x01 \x01(\t\x12\x18\n\x10pred_dataset_ids\x18\x02 \x03(\t\x12\x10\n\x08\x63onf_thr\x18\x03 \x01(\x02\x12\x19\n\x11iou_thrs_interval\x18\x04 \x01(\t\x12\x15\n\rneed_pr_curve\x18\x05 \x01(\x08\"\xca\x02\n\x17SingleDatasetEvaluation\x12\x10\n\x08\x63onf_thr\x18\x01 \x01(\x02\x12\x15\n\rgt_dataset_id\x18\x02 \x01(\t\x12\x17\n\x0fpred_dataset_id\x18\x03 \x01(\t\x12Q\n\x0fiou_evaluations\x18\x04 \x03(\x0b\x32\x38.mir.command.SingleDatasetEvaluation.IouEvaluationsEntry\x12\x41\n\x17iou_averaged_evaluation\x18\x05 \x01(\x0b\x32 .mir.command.SingleIouEvaluation\x1aW\n\x13IouEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12/\n\x05value\x18\x02 \x01(\x0b\x32 .mir.command.SingleIouEvaluation:\x02\x38\x01\"\xb0\x03\n\x13SingleIouEvaluation\x12K\n\x0e\x63i_evaluations\x18\x01 \x03(\x0b\x32\x33.mir.command.SingleIouEvaluation.CiEvaluationsEntry\x12\x42\n\x16\x63i_averaged_evaluation\x18\x02 \x01(\x0b\x32\".mir.command.SingleTopicEvaluation\x12Q\n\x11topic_evaluations\x18\x03 \x03(\x0b\x32\x36.mir.command.SingleIouEvaluation.TopicEvaluationsEntry\x1aX\n\x12\x43iEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTopicEvaluation:\x02\x38\x01\x1a[\n\x15TopicEvaluationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x31\n\x05value\x18\x02 \x01(\x0b\x32\".mir.command.SingleTopicEvaluation:\x02\x38\x01\"~\n\x15SingleTopicEvaluation\x12\n\n\x02\x61p\x18\x01 \x01(\x02\x12\n\n\x02\x61r\x18\x02 \x01(\x02\x12\n\n\x02tp\x18\x03 \x01(\x05\x12\n\n\x02\x66p\x18\x04 \x01(\x05\x12\n\n\x02\x66n\x18\x05 \x01(\x05\x12)\n\x08pr_curve\x18\x06 \x03(\x0b\x32\x17.mir.command.FloatPoint\"\"\n\nFloatPoint\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\"\xa5\x04\n\nMirContext\x12\x12\n\nimages_cnt\x18\x01 \x01(\x05\x12\x1b\n\x13negative_images_cnt\x18\x02 \x01(\x05\x12#\n\x1bproject_negative_images_cnt\x18\x03 \x01(\x05\x12O\n\x15predefined_keyids_cnt\x18\x04 \x03(\x0b\x32\x30.mir.command.MirContext.PredefinedKeyidsCntEntry\x12^\n\x1dproject_predefined_keyids_cnt\x18\x05 \x03(\x0b\x32\x37.mir.command.MirContext.ProjectPredefinedKeyidsCntEntry\x12S\n\x17\x63ustomized_keywords_cnt\x18\x06 \x03(\x0b\x32\x32.mir.command.MirContext.CustomizedKeywordsCntEntry\x1a:\n\x18PredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a\x41\n\x1fProjectPredefinedKeyidsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\x1a<\n\x1a\x43ustomizedKeywordsCntEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01*Z\n\x07TvtType\x12\x12\n\x0eTvtTypeUnknown\x10\x00\x12\x13\n\x0fTvtTypeTraining\x10\x01\x12\x15\n\x11TvtTypeValidation\x10\x02\x12\x0f\n\x0bTvtTypeTest\x10\x03*\x88\x02\n\tAssetType\x12\x14\n\x10\x41ssetTypeUnknown\x10\x00\x12\x16\n\x12\x41ssetTypeImageJpeg\x10\x01\x12\x15\n\x11\x41ssetTypeImagePng\x10\x02\x12\x1a\n\x16\x41ssetTypeImagePixelMat\x10\x03\x12\x19\n\x15\x41ssetTypeImageYuv420p\x10\x04\x12\x1a\n\x16\x41ssetTypeImageYuv420sp\x10\x05\x12\x19\n\x15\x41ssetTypeImageYuv422p\x10\x06\x12\x1a\n\x16\x41ssetTypeImageYuv422sp\x10\x07\x12\x15\n\x11\x41ssetTypeImageBmp\x10\x08\x12\x15\n\x11\x41ssetTypeVideoMp4\x10\x65*\xd3\x02\n\x08TaskType\x12\x13\n\x0fTaskTypeUnknown\x10\x00\x12\x14\n\x10TaskTypeTraining\x10\x01\x12\x12\n\x0eTaskTypeMining\x10\x02\x12\x11\n\rTaskTypeLabel\x10\x03\x12\x12\n\x0eTaskTypeFilter\x10\x04\x12\x16\n\x12TaskTypeImportData\x10\x05\x12\x16\n\x12TaskTypeExportData\x10\x06\x12\x14\n\x10TaskTypeCopyData\x10\x07\x12\x11\n\rTaskTypeMerge\x10\x08\x12\x11\n\rTaskTypeInfer\x10\t\x12\x14\n\x10TaskTypeSampling\x10\n\x12\x12\n\x0eTaskTypeFusion\x10\x0b\x12\x10\n\x0cTaskTypeInit\x10\x0c\x12\x17\n\x13TaskTypeImportModel\x10\r\x12\x14\n\x10TaskTypeEvaluate\x10\x10\"\x04\x08\x0e\x10\x0e\"\x04\x08\x0f\x10\x0f*\x87\x01\n\tTaskState\x12\x14\n\x10TaskStateUnknown\x10\x00\x12\x14\n\x10TaskStatePending\x10\x01\x12\x14\n\x10TaskStateRunning\x10\x02\x12\x11\n\rTaskStateDone\x10\x03\x12\x12\n\x0eTaskStateError\x10\x04\x12\x11\n\rTaskStateMiss\x10\x05*L\n\x08Sha1Type\x12\x15\n\x11SHA1_TYPE_UNKNOWN\x10\x00\x12\x13\n\x0fSHA1_TYPE_ASSET\x10\x01\x12\x14\n\x10SHA1_TYPE_COMMIT\x10\x02*f\n\nMirStorage\x12\x11\n\rMIR_METADATAS\x10\x00\x12\x13\n\x0fMIR_ANNOTATIONS\x10\x01\x12\x10\n\x0cMIR_KEYWORDS\x10\x02\x12\r\n\tMIR_TASKS\x10\x03\x12\x0f\n\x0bMIR_CONTEXT\x10\x04*<\n\x0bLabelFormat\x12\x11\n\rNO_ANNOTATION\x10\x00\x12\x0e\n\nPASCAL_VOC\x10\x01\x12\n\n\x06IF_ARK\x10\x02\x62\x06proto3' ) _TVTTYPE = _descriptor.EnumDescriptor( @@ -53,8 +53,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2754, - serialized_end=2844, + serialized_start=4102, + serialized_end=4192, ) _sym_db.RegisterEnumDescriptor(_TVTTYPE) @@ -119,8 +119,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=2847, - serialized_end=3111, + serialized_start=4195, + serialized_end=4459, ) _sym_db.RegisterEnumDescriptor(_ASSETTYPE) @@ -202,11 +202,16 @@ serialized_options=None, type=None, create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='TaskTypeEvaluate', index=14, number=16, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), ], containing_type=None, serialized_options=None, - serialized_start=3114, - serialized_end=3431, + serialized_start=4462, + serialized_end=4801, ) _sym_db.RegisterEnumDescriptor(_TASKTYPE) @@ -251,8 +256,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3434, - serialized_end=3569, + serialized_start=4804, + serialized_end=4939, ) _sym_db.RegisterEnumDescriptor(_TASKSTATE) @@ -282,8 +287,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3571, - serialized_end=3647, + serialized_start=4941, + serialized_end=5017, ) _sym_db.RegisterEnumDescriptor(_SHA1TYPE) @@ -323,8 +328,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3649, - serialized_end=3751, + serialized_start=5019, + serialized_end=5121, ) _sym_db.RegisterEnumDescriptor(_MIRSTORAGE) @@ -354,8 +359,8 @@ ], containing_type=None, serialized_options=None, - serialized_start=3753, - serialized_end=3813, + serialized_start=5123, + serialized_end=5183, ) _sym_db.RegisterEnumDescriptor(_LABELFORMAT) @@ -388,6 +393,7 @@ TaskTypeFusion = 11 TaskTypeInit = 12 TaskTypeImportModel = 13 +TaskTypeEvaluate = 16 TaskStateUnknown = 0 TaskStatePending = 1 TaskStateRunning = 2 @@ -1173,8 +1179,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2049, - serialized_end=2100, + serialized_start=2094, + serialized_end=2145, ) _TASK = _descriptor.Descriptor( @@ -1242,35 +1248,42 @@ is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='serialized_task_parameters', full_name='mir.command.Task.serialized_task_parameters', index=8, + name='evaluation', full_name='mir.command.Task.evaluation', index=8, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='serialized_task_parameters', full_name='mir.command.Task.serialized_task_parameters', index=9, number=102, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='serialized_executor_config', full_name='mir.command.Task.serialized_executor_config', index=9, + name='serialized_executor_config', full_name='mir.command.Task.serialized_executor_config', index=10, number=103, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='src_revs', full_name='mir.command.Task.src_revs', index=10, + name='src_revs', full_name='mir.command.Task.src_revs', index=11, number=104, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='dst_rev', full_name='mir.command.Task.dst_rev', index=11, + name='dst_rev', full_name='mir.command.Task.dst_rev', index=12, number=105, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor( - name='executor', full_name='mir.command.Task.executor', index=12, + name='executor', full_name='mir.command.Task.executor', index=13, number=106, type=9, cpp_type=9, label=1, has_default_value=False, default_value=b"".decode('utf-8'), message_type=None, enum_type=None, containing_type=None, @@ -1289,7 +1302,7 @@ oneofs=[ ], serialized_start=1689, - serialized_end=2118, + serialized_end=2163, ) @@ -1334,8 +1347,471 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2120, - serialized_end=2200, + serialized_start=2165, + serialized_end=2245, +) + + +_EVALUATION_DATASETEVALUATIONSENTRY = _descriptor.Descriptor( + name='DatasetEvaluationsEntry', + full_name='mir.command.Evaluation.DatasetEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.Evaluation.DatasetEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.Evaluation.DatasetEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2385, + serialized_end=2480, +) + +_EVALUATION = _descriptor.Descriptor( + name='Evaluation', + full_name='mir.command.Evaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='config', full_name='mir.command.Evaluation.config', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='dataset_evaluations', full_name='mir.command.Evaluation.dataset_evaluations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_EVALUATION_DATASETEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2248, + serialized_end=2480, +) + + +_EVALUATECONFIG = _descriptor.Descriptor( + name='EvaluateConfig', + full_name='mir.command.EvaluateConfig', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='gt_dataset_id', full_name='mir.command.EvaluateConfig.gt_dataset_id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_dataset_ids', full_name='mir.command.EvaluateConfig.pred_dataset_ids', index=1, + number=2, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='conf_thr', full_name='mir.command.EvaluateConfig.conf_thr', index=2, + number=3, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_thrs_interval', full_name='mir.command.EvaluateConfig.iou_thrs_interval', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='need_pr_curve', full_name='mir.command.EvaluateConfig.need_pr_curve', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2483, + serialized_end=2616, +) + + +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY = _descriptor.Descriptor( + name='IouEvaluationsEntry', + full_name='mir.command.SingleDatasetEvaluation.IouEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.SingleDatasetEvaluation.IouEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.SingleDatasetEvaluation.IouEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2862, + serialized_end=2949, +) + +_SINGLEDATASETEVALUATION = _descriptor.Descriptor( + name='SingleDatasetEvaluation', + full_name='mir.command.SingleDatasetEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='conf_thr', full_name='mir.command.SingleDatasetEvaluation.conf_thr', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='gt_dataset_id', full_name='mir.command.SingleDatasetEvaluation.gt_dataset_id', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pred_dataset_id', full_name='mir.command.SingleDatasetEvaluation.pred_dataset_id', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_evaluations', full_name='mir.command.SingleDatasetEvaluation.iou_evaluations', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='iou_averaged_evaluation', full_name='mir.command.SingleDatasetEvaluation.iou_averaged_evaluation', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2619, + serialized_end=2949, +) + + +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY = _descriptor.Descriptor( + name='CiEvaluationsEntry', + full_name='mir.command.SingleIouEvaluation.CiEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.SingleIouEvaluation.CiEvaluationsEntry.key', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.SingleIouEvaluation.CiEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3203, + serialized_end=3291, +) + +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY = _descriptor.Descriptor( + name='TopicEvaluationsEntry', + full_name='mir.command.SingleIouEvaluation.TopicEvaluationsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='mir.command.SingleIouEvaluation.TopicEvaluationsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='mir.command.SingleIouEvaluation.TopicEvaluationsEntry.value', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3293, + serialized_end=3384, +) + +_SINGLEIOUEVALUATION = _descriptor.Descriptor( + name='SingleIouEvaluation', + full_name='mir.command.SingleIouEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ci_evaluations', full_name='mir.command.SingleIouEvaluation.ci_evaluations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ci_averaged_evaluation', full_name='mir.command.SingleIouEvaluation.ci_averaged_evaluation', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='topic_evaluations', full_name='mir.command.SingleIouEvaluation.topic_evaluations', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2952, + serialized_end=3384, +) + + +_SINGLETOPICEVALUATION = _descriptor.Descriptor( + name='SingleTopicEvaluation', + full_name='mir.command.SingleTopicEvaluation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='ap', full_name='mir.command.SingleTopicEvaluation.ap', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='ar', full_name='mir.command.SingleTopicEvaluation.ar', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='tp', full_name='mir.command.SingleTopicEvaluation.tp', index=2, + number=3, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fp', full_name='mir.command.SingleTopicEvaluation.fp', index=3, + number=4, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='fn', full_name='mir.command.SingleTopicEvaluation.fn', index=4, + number=5, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='pr_curve', full_name='mir.command.SingleTopicEvaluation.pr_curve', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3386, + serialized_end=3512, +) + + +_FLOATPOINT = _descriptor.Descriptor( + name='FloatPoint', + full_name='mir.command.FloatPoint', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='x', full_name='mir.command.FloatPoint.x', index=0, + number=1, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='y', full_name='mir.command.FloatPoint.y', index=1, + number=2, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=3514, + serialized_end=3548, ) @@ -1373,8 +1849,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2565, - serialized_end=2623, + serialized_start=3913, + serialized_end=3971, ) _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY = _descriptor.Descriptor( @@ -1411,8 +1887,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2625, - serialized_end=2690, + serialized_start=3973, + serialized_end=4038, ) _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY = _descriptor.Descriptor( @@ -1449,8 +1925,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2692, - serialized_end=2752, + serialized_start=4040, + serialized_end=4100, ) _MIRCONTEXT = _descriptor.Descriptor( @@ -1515,8 +1991,8 @@ extension_ranges=[], oneofs=[ ], - serialized_start=2203, - serialized_end=2752, + serialized_start=3551, + serialized_end=4100, ) _MIRMETADATAS_ATTRIBUTESENTRY.fields_by_name['value'].message_type = _METADATAATTRIBUTES @@ -1546,6 +2022,23 @@ _TASK.fields_by_name['type'].enum_type = _TASKTYPE _TASK.fields_by_name['model'].message_type = _MODELMETA _TASK.fields_by_name['unknown_types'].message_type = _TASK_UNKNOWNTYPESENTRY +_TASK.fields_by_name['evaluation'].message_type = _EVALUATION +_EVALUATION_DATASETEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEDATASETEVALUATION +_EVALUATION_DATASETEVALUATIONSENTRY.containing_type = _EVALUATION +_EVALUATION.fields_by_name['config'].message_type = _EVALUATECONFIG +_EVALUATION.fields_by_name['dataset_evaluations'].message_type = _EVALUATION_DATASETEVALUATIONSENTRY +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLEIOUEVALUATION +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY.containing_type = _SINGLEDATASETEVALUATION +_SINGLEDATASETEVALUATION.fields_by_name['iou_evaluations'].message_type = _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY +_SINGLEDATASETEVALUATION.fields_by_name['iou_averaged_evaluation'].message_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY.fields_by_name['value'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY.containing_type = _SINGLEIOUEVALUATION +_SINGLEIOUEVALUATION.fields_by_name['ci_evaluations'].message_type = _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY +_SINGLEIOUEVALUATION.fields_by_name['ci_averaged_evaluation'].message_type = _SINGLETOPICEVALUATION +_SINGLEIOUEVALUATION.fields_by_name['topic_evaluations'].message_type = _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY +_SINGLETOPICEVALUATION.fields_by_name['pr_curve'].message_type = _FLOATPOINT _MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY.containing_type = _MIRCONTEXT _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY.containing_type = _MIRCONTEXT _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY.containing_type = _MIRCONTEXT @@ -1566,6 +2059,12 @@ DESCRIPTOR.message_types_by_name['MirTasks'] = _MIRTASKS DESCRIPTOR.message_types_by_name['Task'] = _TASK DESCRIPTOR.message_types_by_name['ModelMeta'] = _MODELMETA +DESCRIPTOR.message_types_by_name['Evaluation'] = _EVALUATION +DESCRIPTOR.message_types_by_name['EvaluateConfig'] = _EVALUATECONFIG +DESCRIPTOR.message_types_by_name['SingleDatasetEvaluation'] = _SINGLEDATASETEVALUATION +DESCRIPTOR.message_types_by_name['SingleIouEvaluation'] = _SINGLEIOUEVALUATION +DESCRIPTOR.message_types_by_name['SingleTopicEvaluation'] = _SINGLETOPICEVALUATION +DESCRIPTOR.message_types_by_name['FloatPoint'] = _FLOATPOINT DESCRIPTOR.message_types_by_name['MirContext'] = _MIRCONTEXT DESCRIPTOR.enum_types_by_name['TvtType'] = _TVTTYPE DESCRIPTOR.enum_types_by_name['AssetType'] = _ASSETTYPE @@ -1730,6 +2229,80 @@ }) _sym_db.RegisterMessage(ModelMeta) +Evaluation = _reflection.GeneratedProtocolMessageType('Evaluation', (_message.Message,), { + + 'DatasetEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('DatasetEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATION_DATASETEVALUATIONSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.Evaluation.DatasetEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _EVALUATION, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.Evaluation) + }) +_sym_db.RegisterMessage(Evaluation) +_sym_db.RegisterMessage(Evaluation.DatasetEvaluationsEntry) + +EvaluateConfig = _reflection.GeneratedProtocolMessageType('EvaluateConfig', (_message.Message,), { + 'DESCRIPTOR' : _EVALUATECONFIG, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.EvaluateConfig) + }) +_sym_db.RegisterMessage(EvaluateConfig) + +SingleDatasetEvaluation = _reflection.GeneratedProtocolMessageType('SingleDatasetEvaluation', (_message.Message,), { + + 'IouEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('IouEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleDatasetEvaluation.IouEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLEDATASETEVALUATION, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleDatasetEvaluation) + }) +_sym_db.RegisterMessage(SingleDatasetEvaluation) +_sym_db.RegisterMessage(SingleDatasetEvaluation.IouEvaluationsEntry) + +SingleIouEvaluation = _reflection.GeneratedProtocolMessageType('SingleIouEvaluation', (_message.Message,), { + + 'CiEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('CiEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIOUEVALUATION_CIEVALUATIONSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleIouEvaluation.CiEvaluationsEntry) + }) + , + + 'TopicEvaluationsEntry' : _reflection.GeneratedProtocolMessageType('TopicEvaluationsEntry', (_message.Message,), { + 'DESCRIPTOR' : _SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleIouEvaluation.TopicEvaluationsEntry) + }) + , + 'DESCRIPTOR' : _SINGLEIOUEVALUATION, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleIouEvaluation) + }) +_sym_db.RegisterMessage(SingleIouEvaluation) +_sym_db.RegisterMessage(SingleIouEvaluation.CiEvaluationsEntry) +_sym_db.RegisterMessage(SingleIouEvaluation.TopicEvaluationsEntry) + +SingleTopicEvaluation = _reflection.GeneratedProtocolMessageType('SingleTopicEvaluation', (_message.Message,), { + 'DESCRIPTOR' : _SINGLETOPICEVALUATION, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.SingleTopicEvaluation) + }) +_sym_db.RegisterMessage(SingleTopicEvaluation) + +FloatPoint = _reflection.GeneratedProtocolMessageType('FloatPoint', (_message.Message,), { + 'DESCRIPTOR' : _FLOATPOINT, + '__module__' : 'mir_command_pb2' + # @@protoc_insertion_point(class_scope:mir.command.FloatPoint) + }) +_sym_db.RegisterMessage(FloatPoint) + MirContext = _reflection.GeneratedProtocolMessageType('MirContext', (_message.Message,), { 'PredefinedKeyidsCntEntry' : _reflection.GeneratedProtocolMessageType('PredefinedKeyidsCntEntry', (_message.Message,), { @@ -1769,6 +2342,10 @@ _MIRKEYWORDS_INDEXPREDIFINEDKEYIDSENTRY._options = None _MIRTASKS_TASKSENTRY._options = None _TASK_UNKNOWNTYPESENTRY._options = None +_EVALUATION_DATASETEVALUATIONSENTRY._options = None +_SINGLEDATASETEVALUATION_IOUEVALUATIONSENTRY._options = None +_SINGLEIOUEVALUATION_CIEVALUATIONSENTRY._options = None +_SINGLEIOUEVALUATION_TOPICEVALUATIONSENTRY._options = None _MIRCONTEXT_PREDEFINEDKEYIDSCNTENTRY._options = None _MIRCONTEXT_PROJECTPREDEFINEDKEYIDSCNTENTRY._options = None _MIRCONTEXT_CUSTOMIZEDKEYWORDSCNTENTRY._options = None diff --git a/ymir/command/mir/protos/mir_command_pb2.pyi b/ymir/command/mir/protos/mir_command_pb2.pyi index 1721a8b3a5..b1874b43a4 100644 --- a/ymir/command/mir/protos/mir_command_pb2.pyi +++ b/ymir/command/mir/protos/mir_command_pb2.pyi @@ -84,6 +84,7 @@ class _TaskTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumT TaskTypeInit = TaskType.V(12) TaskTypeImportModel = TaskType.V(13) + TaskTypeEvaluate = TaskType.V(16) TaskTypeUnknown = TaskType.V(0) TaskTypeTraining = TaskType.V(1) @@ -101,6 +102,7 @@ TaskTypeFusion = TaskType.V(11) TaskTypeInit = TaskType.V(12) TaskTypeImportModel = TaskType.V(13) +TaskTypeEvaluate = TaskType.V(16) global___TaskType = TaskType @@ -520,6 +522,7 @@ class Task(google.protobuf.message.Message): UNKNOWN_TYPES_FIELD_NUMBER: builtins.int RETURN_CODE_FIELD_NUMBER: builtins.int RETURN_MSG_FIELD_NUMBER: builtins.int + EVALUATION_FIELD_NUMBER: builtins.int SERIALIZED_TASK_PARAMETERS_FIELD_NUMBER: builtins.int SERIALIZED_EXECUTOR_CONFIG_FIELD_NUMBER: builtins.int SRC_REVS_FIELD_NUMBER: builtins.int @@ -547,6 +550,8 @@ class Task(google.protobuf.message.Message): pass return_code: builtins.int = ... return_msg: typing.Text = ... + @property + def evaluation(self) -> global___Evaluation: ... serialized_task_parameters: typing.Text = ... serialized_executor_config: typing.Text = ... src_revs: typing.Text = ... @@ -562,14 +567,15 @@ class Task(google.protobuf.message.Message): unknown_types : typing.Optional[typing.Mapping[typing.Text, builtins.int]] = ..., return_code : builtins.int = ..., return_msg : typing.Text = ..., + evaluation : typing.Optional[global___Evaluation] = ..., serialized_task_parameters : typing.Text = ..., serialized_executor_config : typing.Text = ..., src_revs : typing.Text = ..., dst_rev : typing.Text = ..., executor : typing.Text = ..., ) -> None: ... - def HasField(self, field_name: typing_extensions.Literal["model",b"model"]) -> builtins.bool: ... - def ClearField(self, field_name: typing_extensions.Literal["dst_rev",b"dst_rev","executor",b"executor","model",b"model","name",b"name","return_code",b"return_code","return_msg",b"return_msg","serialized_executor_config",b"serialized_executor_config","serialized_task_parameters",b"serialized_task_parameters","src_revs",b"src_revs","task_id",b"task_id","timestamp",b"timestamp","type",b"type","unknown_types",b"unknown_types"]) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["evaluation",b"evaluation","model",b"model"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["dst_rev",b"dst_rev","evaluation",b"evaluation","executor",b"executor","model",b"model","name",b"name","return_code",b"return_code","return_msg",b"return_msg","serialized_executor_config",b"serialized_executor_config","serialized_task_parameters",b"serialized_task_parameters","src_revs",b"src_revs","task_id",b"task_id","timestamp",b"timestamp","type",b"type","unknown_types",b"unknown_types"]) -> None: ... global___Task = Task class ModelMeta(google.protobuf.message.Message): @@ -595,6 +601,207 @@ class ModelMeta(google.protobuf.message.Message): def ClearField(self, field_name: typing_extensions.Literal["context",b"context","mean_average_precision",b"mean_average_precision","model_hash",b"model_hash"]) -> None: ... global___ModelMeta = ModelMeta +class Evaluation(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class DatasetEvaluationsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___SingleDatasetEvaluation: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___SingleDatasetEvaluation] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + CONFIG_FIELD_NUMBER: builtins.int + DATASET_EVALUATIONS_FIELD_NUMBER: builtins.int + @property + def config(self) -> global___EvaluateConfig: ... + @property + def dataset_evaluations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleDatasetEvaluation]: + """key: prediction dataset id, value: evaluation result for ground truth and prediction dataset""" + pass + def __init__(self, + *, + config : typing.Optional[global___EvaluateConfig] = ..., + dataset_evaluations : typing.Optional[typing.Mapping[typing.Text, global___SingleDatasetEvaluation]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["config",b"config"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["config",b"config","dataset_evaluations",b"dataset_evaluations"]) -> None: ... +global___Evaluation = Evaluation + +class EvaluateConfig(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + GT_DATASET_ID_FIELD_NUMBER: builtins.int + PRED_DATASET_IDS_FIELD_NUMBER: builtins.int + CONF_THR_FIELD_NUMBER: builtins.int + IOU_THRS_INTERVAL_FIELD_NUMBER: builtins.int + NEED_PR_CURVE_FIELD_NUMBER: builtins.int + gt_dataset_id: typing.Text = ... + @property + def pred_dataset_ids(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[typing.Text]: ... + conf_thr: builtins.float = ... + iou_thrs_interval: typing.Text = ... + need_pr_curve: builtins.bool = ... + def __init__(self, + *, + gt_dataset_id : typing.Text = ..., + pred_dataset_ids : typing.Optional[typing.Iterable[typing.Text]] = ..., + conf_thr : builtins.float = ..., + iou_thrs_interval : typing.Text = ..., + need_pr_curve : builtins.bool = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["conf_thr",b"conf_thr","gt_dataset_id",b"gt_dataset_id","iou_thrs_interval",b"iou_thrs_interval","need_pr_curve",b"need_pr_curve","pred_dataset_ids",b"pred_dataset_ids"]) -> None: ... +global___EvaluateConfig = EvaluateConfig + +class SingleDatasetEvaluation(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class IouEvaluationsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___SingleIouEvaluation: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___SingleIouEvaluation] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + CONF_THR_FIELD_NUMBER: builtins.int + GT_DATASET_ID_FIELD_NUMBER: builtins.int + PRED_DATASET_ID_FIELD_NUMBER: builtins.int + IOU_EVALUATIONS_FIELD_NUMBER: builtins.int + IOU_AVERAGED_EVALUATION_FIELD_NUMBER: builtins.int + conf_thr: builtins.float = ... + gt_dataset_id: typing.Text = ... + pred_dataset_id: typing.Text = ... + @property + def iou_evaluations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleIouEvaluation]: + """key: string of iou threshold""" + pass + @property + def iou_averaged_evaluation(self) -> global___SingleIouEvaluation: + """average for all ious""" + pass + def __init__(self, + *, + conf_thr : builtins.float = ..., + gt_dataset_id : typing.Text = ..., + pred_dataset_id : typing.Text = ..., + iou_evaluations : typing.Optional[typing.Mapping[typing.Text, global___SingleIouEvaluation]] = ..., + iou_averaged_evaluation : typing.Optional[global___SingleIouEvaluation] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["iou_averaged_evaluation",b"iou_averaged_evaluation"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["conf_thr",b"conf_thr","gt_dataset_id",b"gt_dataset_id","iou_averaged_evaluation",b"iou_averaged_evaluation","iou_evaluations",b"iou_evaluations","pred_dataset_id",b"pred_dataset_id"]) -> None: ... +global___SingleDatasetEvaluation = SingleDatasetEvaluation + +class SingleIouEvaluation(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + class CiEvaluationsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: builtins.int = ... + @property + def value(self) -> global___SingleTopicEvaluation: ... + def __init__(self, + *, + key : builtins.int = ..., + value : typing.Optional[global___SingleTopicEvaluation] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + class TopicEvaluationsEntry(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + KEY_FIELD_NUMBER: builtins.int + VALUE_FIELD_NUMBER: builtins.int + key: typing.Text = ... + @property + def value(self) -> global___SingleTopicEvaluation: ... + def __init__(self, + *, + key : typing.Text = ..., + value : typing.Optional[global___SingleTopicEvaluation] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["value",b"value"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["key",b"key","value",b"value"]) -> None: ... + + CI_EVALUATIONS_FIELD_NUMBER: builtins.int + CI_AVERAGED_EVALUATION_FIELD_NUMBER: builtins.int + TOPIC_EVALUATIONS_FIELD_NUMBER: builtins.int + @property + def ci_evaluations(self) -> google.protobuf.internal.containers.MessageMap[builtins.int, global___SingleTopicEvaluation]: + """key: class ids""" + pass + @property + def ci_averaged_evaluation(self) -> global___SingleTopicEvaluation: + """evaluations averaged by class ids""" + pass + @property + def topic_evaluations(self) -> google.protobuf.internal.containers.MessageMap[typing.Text, global___SingleTopicEvaluation]: + """key: topic names""" + pass + def __init__(self, + *, + ci_evaluations : typing.Optional[typing.Mapping[builtins.int, global___SingleTopicEvaluation]] = ..., + ci_averaged_evaluation : typing.Optional[global___SingleTopicEvaluation] = ..., + topic_evaluations : typing.Optional[typing.Mapping[typing.Text, global___SingleTopicEvaluation]] = ..., + ) -> None: ... + def HasField(self, field_name: typing_extensions.Literal["ci_averaged_evaluation",b"ci_averaged_evaluation"]) -> builtins.bool: ... + def ClearField(self, field_name: typing_extensions.Literal["ci_averaged_evaluation",b"ci_averaged_evaluation","ci_evaluations",b"ci_evaluations","topic_evaluations",b"topic_evaluations"]) -> None: ... +global___SingleIouEvaluation = SingleIouEvaluation + +class SingleTopicEvaluation(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + AP_FIELD_NUMBER: builtins.int + AR_FIELD_NUMBER: builtins.int + TP_FIELD_NUMBER: builtins.int + FP_FIELD_NUMBER: builtins.int + FN_FIELD_NUMBER: builtins.int + PR_CURVE_FIELD_NUMBER: builtins.int + ap: builtins.float = ... + ar: builtins.float = ... + tp: builtins.int = ... + fp: builtins.int = ... + fn: builtins.int = ... + @property + def pr_curve(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___FloatPoint]: ... + def __init__(self, + *, + ap : builtins.float = ..., + ar : builtins.float = ..., + tp : builtins.int = ..., + fp : builtins.int = ..., + fn : builtins.int = ..., + pr_curve : typing.Optional[typing.Iterable[global___FloatPoint]] = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["ap",b"ap","ar",b"ar","fn",b"fn","fp",b"fp","pr_curve",b"pr_curve","tp",b"tp"]) -> None: ... +global___SingleTopicEvaluation = SingleTopicEvaluation + +class FloatPoint(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... + X_FIELD_NUMBER: builtins.int + Y_FIELD_NUMBER: builtins.int + x: builtins.float = ... + y: builtins.float = ... + def __init__(self, + *, + x : builtins.float = ..., + y : builtins.float = ..., + ) -> None: ... + def ClearField(self, field_name: typing_extensions.Literal["x",b"x","y",b"y"]) -> None: ... +global___FloatPoint = FloatPoint + class MirContext(google.protobuf.message.Message): """/ ========== context.mir ==========""" DESCRIPTOR: google.protobuf.descriptor.Descriptor = ... diff --git a/ymir/command/mir/tools/annotations.py b/ymir/command/mir/tools/annotations.py index 1a0b11fed5..b15d5ac39b 100644 --- a/ymir/command/mir/tools/annotations.py +++ b/ymir/command/mir/tools/annotations.py @@ -1,7 +1,7 @@ from collections import defaultdict import logging import os -from typing import Dict, List, Set, Tuple +from typing import Dict, List, Optional, Tuple import xml.dom.minidom @@ -12,7 +12,7 @@ from mir.protos import mir_command_pb2 as mirpb -def _get_dom_xml_tag_node(node: xml.dom.minidom.Element, tag_name: str) -> xml.dom.minidom.Element: +def _get_dom_xml_tag_node(node: xml.dom.minidom.Element, tag_name: str) -> Optional[xml.dom.minidom.Element]: """ suppose we have the following xml: ``` @@ -27,7 +27,7 @@ def _get_dom_xml_tag_node(node: xml.dom.minidom.Element, tag_name: str) -> xml.d tag_nodes = node.getElementsByTagName(tag_name) if len(tag_nodes) > 0 and len(tag_nodes[0].childNodes) > 0: return tag_nodes[0] - raise MirRuntimeError(MirCode.RC_CMD_INVALID_FILE, f"found no element for key: {tag_name}") + return None def _get_dom_xml_tag_data(node: xml.dom.minidom.Element, tag_name: str) -> str: @@ -55,6 +55,9 @@ def _xml_obj_to_annotation(obj: xml.dom.minidom.Element, """ name = _xml_obj_to_type_name(obj) bndbox_node = _get_dom_xml_tag_node(obj, "bndbox") + if not bndbox_node: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message='found no value for bndbox') + xmin = int(float(_get_dom_xml_tag_data(bndbox_node, "xmin"))) ymin = int(float(_get_dom_xml_tag_data(bndbox_node, "ymin"))) xmax = int(float(_get_dom_xml_tag_data(bndbox_node, "xmax"))) @@ -62,13 +65,17 @@ def _xml_obj_to_annotation(obj: xml.dom.minidom.Element, width = xmax - xmin + 1 height = ymax - ymin + 1 + # there's no `score` key in original voc format, we add it here to support box conf score + score_str = _get_dom_xml_tag_data(obj, 'score') + score = float(score_str) if score_str else 2.0 + annotation = mirpb.Annotation() annotation.class_id = class_type_manager.id_and_main_name_for_name(name)[0] annotation.box.x = xmin annotation.box.y = ymin annotation.box.w = width annotation.box.h = height - annotation.score = 0 + annotation.score = score return annotation @@ -76,45 +83,9 @@ def _xml_obj_to_type_name(obj: xml.dom.minidom.Element) -> str: return _get_dom_xml_tag_data(obj, "name").lower() -def _read_customized_Keywords(ck_file: str) -> Dict[str, Set[str]]: - """ - read customized keywords out from ck file - - Args: - ck_file (str): tsv file, \t\t... - - Returns: - Dict[str, Set[str]]: key: asset name, value: set of customized keywords - - Raises: - ValueError: if found dumplicat key in ck file - """ - name_cks = {} # type: Dict[str, Set[str]] - if not ck_file: - return name_cks - - with open(ck_file, 'r') as f: - lines = f.read().splitlines() - - for line in lines: - if not line: - continue - components = line.split('\t') - if len(components) == 1: - continue - - if components[0] in name_cks: - raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, - error_message=f'dumplicate asset name in ck file: {components[0]}') - - name_cks[components[0]] = set(components[1:]) - - return name_cks - - -def import_annotations(mir_annotation: mirpb.MirAnnotations, mir_keywords: mirpb.MirKeywords, in_sha1_file: str, - ck_file: str, mir_root: str, annotations_dir_path: str, task_id: str, - phase: str) -> Tuple[int, Dict[str, int]]: +def import_annotations(mir_metadatas: mirpb.MirMetadatas, mir_annotation: mirpb.MirAnnotations, + in_sha1_file: str, mir_root: str, + annotations_dir_path: str, task_id: str, phase: str) -> Tuple[int, Dict[str, int]]: """ imports annotations @@ -122,7 +93,6 @@ def import_annotations(mir_annotation: mirpb.MirAnnotations, mir_keywords: mirpb mir_annotation (mirpb.MirAnnotations): data buf for annotations.mir mir_keywords (mirpb.MirKeywords): data buf for keywords.mir in_sha1_file (str): path to sha1 file - ck_file (str): path to customized keywords file mir_root (str): path to mir repo annotations_dir_path (str): path to annotations root task_id (str): task id @@ -141,12 +111,9 @@ def import_annotations(mir_annotation: mirpb.MirAnnotations, mir_keywords: mirpb class_type_manager = class_ids.ClassIdManager(mir_root=mir_root) logging.info("loaded type id and names: %d", class_type_manager.size()) - # read customized keywords from ck_file - cks = _read_customized_Keywords(ck_file) - image_annotations = mir_annotation.task_annotations[task_id].image_annotations - assethash_filename_list = [] # type: List[Tuple[str, str, str]] + assethash_filename_list: List[Tuple[str, str]] = [] # hash id and main file name with open(in_sha1_file, "r") as in_file: for line in in_file.readlines(): line_components = line.strip().split('\t') @@ -154,15 +121,17 @@ def import_annotations(mir_annotation: mirpb.MirAnnotations, mir_keywords: mirpb logging.warning("incomplete line: %s", line) continue asset_hash, file_name = line_components[0], line_components[1] + if asset_hash not in mir_metadatas.attributes: + continue main_file_name = os.path.splitext(os.path.basename(file_name))[0] - assethash_filename_list.append((asset_hash, main_file_name, file_name)) + assethash_filename_list.append((asset_hash, main_file_name)) total_assethash_count = len(assethash_filename_list) logging.info(f"wrting {total_assethash_count} annotations") counter = 0 missing_annotations_counter = 0 - for asset_hash, main_file_name, file_path in assethash_filename_list: + for asset_hash, main_file_name in assethash_filename_list: # for each asset, import it's annotations annotation_file = os.path.join(annotations_dir_path, main_file_name + '.xml') if annotations_dir_path else None if not annotation_file or not os.path.isfile(annotation_file): @@ -185,10 +154,6 @@ def import_annotations(mir_annotation: mirpb.MirAnnotations, mir_keywords: mirpb else: unknown_types_and_count[type_name] += 1 - # import customized keywords - if file_path in cks: - mir_keywords.keywords[asset_hash].customized_keywords[:] = cks[file_path] - counter += 1 if counter % 5000 == 0: PhaseLoggerCenter.update_phase(phase=phase, local_percent=(counter / total_assethash_count)) diff --git a/ymir/command/mir/tools/class_ids.py b/ymir/command/mir/tools/class_ids.py index 8367c58d04..37c2593809 100644 --- a/ymir/command/mir/tools/class_ids.py +++ b/ymir/command/mir/tools/class_ids.py @@ -6,7 +6,6 @@ from mir.tools import utils as mir_utils - EXPECTED_FILE_VERSION = 1 @@ -60,10 +59,7 @@ def _generate_dicts(cls, values: dict) -> dict: label_to_ids: Dict[str, Tuple[int, Optional[str]]] = {} id_to_labels: Dict[int, str] = {} for label in labels: - _set_if_not_exists(k=label.name, - v=(label.id, None), - d=label_to_ids, - error_message_prefix='duplicated name') + _set_if_not_exists(k=label.name, v=(label.id, None), d=label_to_ids, error_message_prefix='duplicated name') # key: aliases for label_alias in label.aliases: _set_if_not_exists(k=label_alias, @@ -72,10 +68,7 @@ def _generate_dicts(cls, values: dict) -> dict: error_message_prefix='duplicated alias') # self._type_id_name_dict - _set_if_not_exists(k=label.id, - v=label.name, - d=id_to_labels, - error_message_prefix='duplicated id') + _set_if_not_exists(k=label.id, v=label.name, d=id_to_labels, error_message_prefix='duplicated id') values['_label_to_ids'] = label_to_ids values['_id_to_labels'] = id_to_labels @@ -145,10 +138,11 @@ def id_and_main_name_for_name(self, name: str) -> Tuple[int, Optional[str]]: name (str): main type name or alias Raises: - ClassIdManagerError: if not loaded, or name is empty, or can not find name + ClassIdManagerError: if not loaded, or name is empty Returns: - Tuple[int, Optional[str]]: (type id, main type name) + Tuple[int, Optional[str]]: (type id, main type name), + if name not found, returns -1, None """ name = name.strip().lower() if not self._storage_file_path: @@ -157,7 +151,7 @@ def id_and_main_name_for_name(self, name: str) -> Tuple[int, Optional[str]]: raise ClassIdManagerError("empty name") if name not in self._label_storage._label_to_ids: - raise ClassIdManagerError(f"not exists: {name}") + return -1, None return self._label_storage._label_to_ids[name] @@ -173,7 +167,7 @@ def main_name_for_id(self, type_id: int) -> Optional[str]: """ return self._label_storage._id_to_labels.get(type_id, None) - def id_for_names(self, names: List[str]) -> List[int]: + def id_for_names(self, names: List[str]) -> Tuple[List[int], List[str]]: """ return all type ids for names @@ -181,9 +175,18 @@ def id_for_names(self, names: List[str]) -> List[int]: names (List[str]): main type names or alias Returns: - List[int]: corresponding type ids + Tuple[List[int], List[str]]: corresponding type ids and unknown names """ - return [self.id_and_main_name_for_name(name=name)[0] for name in names] + class_ids = [] + unknown_names = [] + for name in names: + class_id = self.id_and_main_name_for_name(name=name)[0] + class_ids.append(class_id) + + if class_id < 0: + unknown_names.append(name) + + return class_ids, unknown_names def all_main_names(self) -> List[str]: """ diff --git a/ymir/command/mir/tools/command_run_in_out.py b/ymir/command/mir/tools/command_run_in_out.py index 849494dca4..c895374a02 100644 --- a/ymir/command/mir/tools/command_run_in_out.py +++ b/ymir/command/mir/tools/command_run_in_out.py @@ -128,7 +128,7 @@ def wrapper(mir_root: str, src_revs: str, dst_rev: str, work_dir: str, *args: tu state_content=state_message, trace_message='') - logging.info(f"command done: {dst_rev}, result: {ret}") + logging.info(f"command done: {dst_rev}, return code: {ret}") _cleanup(work_dir=work_dir) diff --git a/ymir/command/mir/tools/context.py b/ymir/command/mir/tools/context.py index 12a8b8a295..821f633679 100644 --- a/ymir/command/mir/tools/context.py +++ b/ymir/command/mir/tools/context.py @@ -30,19 +30,6 @@ def save(mir_root: str, project_class_ids: List[int]) -> None: # general -def check_class_names(mir_root: str, current_class_names: List[str]) -> bool: - """ - check `current_class_names` matches mir repo's project class ids settings - - if mir repo has project class ids settings, this function returns True if they are equal - - if mir repo has no project class ids settings, this function always returns True, meaning they are always matched - """ - class_id_manager = class_ids.ClassIdManager(mir_root) - current_class_ids = class_id_manager.id_for_names(current_class_names) - return check_class_ids(mir_root=mir_root, current_class_ids=current_class_ids) - - def check_class_ids(mir_root: str, current_class_ids: List[int]) -> bool: """ check `current_class_ids` matches mir repo's project class ids settings diff --git a/ymir/command/mir/tools/det_eval.py b/ymir/command/mir/tools/det_eval.py new file mode 100644 index 0000000000..b07c11237d --- /dev/null +++ b/ymir/command/mir/tools/det_eval.py @@ -0,0 +1,641 @@ +from collections import defaultdict +from typing import Any, List, Optional, Set, Union +from mir.tools.code import MirCode + +import numpy as np + +from mir.tools import mir_storage_ops, revs_parser +from mir.tools.errors import MirRuntimeError +from mir.protos import mir_command_pb2 as mirpb + + +class MirCoco: + def __init__(self, mir_root: str, rev_tid: revs_parser.TypRevTid, conf_thr: float) -> None: + m: mirpb.MirMetadatas + a: mirpb.MirAnnotations + k: mirpb.MirKeywords + m, a, k, = mir_storage_ops.MirStorageOps.load_multiple_storages(mir_root=mir_root, + mir_branch=rev_tid.rev, + mir_task_id=rev_tid.tid, + ms_list=[ + mirpb.MirStorage.MIR_METADATAS, + mirpb.MirStorage.MIR_ANNOTATIONS, + mirpb.MirStorage.MIR_KEYWORDS, + ]) + if len(m.attributes) == 0: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='no assets in evaluated dataset') + if len(a.task_annotations[a.head_task_id].image_annotations) == 0: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='no annotations in evaluated dataset') + + self._mir_metadatas = m + self._mir_annotations = a + + # ordered list of asset / image ids + self._ordered_asset_ids = sorted(list(self._mir_metadatas.attributes.keys())) + # key: asset id, value: index in `self._ordered_asset_ids` + self._asset_id_to_ordered_idxes = {asset_id: idx for idx, asset_id in enumerate(self._ordered_asset_ids)} + # ordered list of class / category ids + self._ordered_class_ids = sorted(list(k.index_predifined_keyids.keys())) + + self.img_cat_to_annotations = defaultdict(list) + annos = self._get_annotations(asset_idxes=self.get_asset_idxes(), + class_ids=self.get_class_ids(), + conf_thr=conf_thr) + for anno in annos: + self.img_cat_to_annotations[anno['asset_idx'], anno['class_id']].append(anno) + + self.dataset_id = rev_tid.rev_tid + + def load_dts_from_gt(self, mir_root: str, rev_tids: List[revs_parser.TypRevTid], + conf_thr: float) -> List['MirCoco']: + gt_asset_ids_set = set(self.get_asset_ids()) + mir_dts: List['MirCoco'] = [] + for rev_tid in rev_tids: + mir_dt = MirCoco(mir_root=mir_root, rev_tid=rev_tid, conf_thr=conf_thr) + if set(mir_dt.mir_metadatas.attributes.keys()) != gt_asset_ids_set: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='prediction and ground truth have different assets') + + mir_dts.append(mir_dt) + return mir_dts + + @property + def mir_metadatas(self) -> mirpb.MirMetadatas: + return self._mir_metadatas + + @property + def mir_annotations(self) -> mirpb.MirAnnotations: + return self._mir_annotations + + def _get_annotations(self, asset_idxes: List[int], class_ids: List[int], conf_thr: float) -> List[dict]: + """ + get all annotations list for asset ids and class ids + + if asset_idxes and class_ids provided, only returns filtered annotations + + Args: + asset_idxes (List[int]): asset ids, if not provided, returns annotations for all images + class_ids (List[int]): class ids, if not provided, returns annotations for all classe + conf_thr (float): confidence threshold of bbox + + Returns: + a list of annotations and asset ids + each element is a dict, and has following keys and values: + asset_id: str, image / asset id + asset_idx: int, position of asset id in `self.get_asset_ids()` + id: int, id for a single annotation + class_id: int, category / class id + area: int, area of bbox + bbox: List[int], bounding box, xywh + score: float, confidence of bbox + iscrowd: always 0 because mir knows nothing about it + """ + result_annotations_list: List[dict] = [] + + single_task_annotations = self._mir_annotations.task_annotations[self._mir_annotations.head_task_id] + if not asset_idxes: + asset_idxes = self.get_asset_idxes() + + annotation_idx = 1 + for asset_idx in asset_idxes: + asset_id = self._ordered_asset_ids[asset_idx] + if asset_id not in single_task_annotations.image_annotations: + continue + + single_image_annotations = single_task_annotations.image_annotations[asset_id] + for annotation in single_image_annotations.annotations: + if class_ids and annotation.class_id not in class_ids: + continue + if annotation.score < conf_thr: + continue + + annotation_dict = { + 'asset_id': asset_id, + 'asset_idx': asset_idx, + 'id': annotation_idx, + 'class_id': annotation.class_id, + 'area': annotation.box.w * annotation.box.h, + 'bbox': [annotation.box.x, annotation.box.y, annotation.box.w, annotation.box.h], + 'score': annotation.score, + 'iscrowd': 0, + 'ignore': 0, + } + result_annotations_list.append(annotation_dict) + + annotation_idx += 1 + + return result_annotations_list + + def get_asset_ids(self) -> List[str]: + return self._ordered_asset_ids + + def get_asset_idxes(self) -> List[int]: + return list(range(len(self._ordered_asset_ids))) + + def get_class_ids(self) -> List[int]: + return self._ordered_class_ids + + +class MirDetEval: + def __init__(self, coco_gt: MirCoco, coco_dt: MirCoco, params: 'Params' = None): + self.cocoGt = coco_gt # ground truth COCO API + self.cocoDt = coco_dt # detections COCO API + self.evalImgs: list = [] # per-image per-category evaluation results [KxAxI] elements + self.eval: dict = {} # accumulated evaluation results + self._gts: dict = coco_gt.img_cat_to_annotations # gt for evaluation + self._dts: dict = coco_dt.img_cat_to_annotations # dt for evaluation + self.params = params or Params() # parameters + self.stats: np.ndarray = np.zeros(1) # result summarization + self.ious: dict = { + } # key: (asset id, class id), value: ious ndarray of ith dt (sorted by score, desc) and jth gt + self.params.imgIdxes = coco_gt.get_asset_idxes() + self.params.catIds = coco_gt.get_class_ids() + + def evaluate(self) -> None: + ''' + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + + Returns: None + SideEffects: + self.params.catIds / imgIdxes: duplicated class and asset ids will be removed + self.params.maxDets: will be sorted + self.ious: will be cauculated + self.evalImgs: will be cauculated + ''' + self.params.maxDets.sort() + p = self.params + + # loop through images, area range, max detection number + catIds = p.catIds + + # self.ious: key: (img_idx, class_id), value: ious ndarray of len(dts) * len(gts) + self.ious = {(imgIdx, catId): self.computeIoU(imgIdx, catId) for imgIdx in p.imgIdxes for catId in catIds} + + maxDet = p.maxDets[-1] + self.evalImgs = [ + self.evaluateImg(imgIdx, catId, areaRng, maxDet) for catId in catIds for areaRng in p.areaRng + for imgIdx in p.imgIdxes + ] + + def computeIoU(self, imgIdx: int, catId: int) -> Union[np.ndarray, list]: + """ + compute ious of detections and ground truth boxes of single image and class /category + + Args: + imgIdx (int): asset / image ordered idx + catId (int): category / class id + + Returns: + ious ndarray of detections and ground truth boxes of single image and category + ious[i][j] means the iou i-th detection (sorted by score, desc) and j-th ground truth box + """ + gt = self._gts[imgIdx, catId] + dt = self._dts[imgIdx, catId] + if len(gt) == 0 and len(dt) == 0: + return [] + + # sort dt by score, desc + inds = np.argsort([-d['score'] for d in dt], kind='mergesort') + dt = [dt[i] for i in inds] + if len(dt) > self.params.maxDets[-1]: + dt = dt[0:self.params.maxDets[-1]] + + g_boxes = [g['bbox'] for g in gt] + d_boxes = [d['bbox'] for d in dt] + + iscrowd = [int(o.get('iscrowd', 0)) for o in gt] + # compute iou between each dt and gt region + # ious: matrix of len(d_boxes) * len(g_boxes) + # ious[i][j]: iou of d_boxes[i] and g_boxes[j] + ious = self._iou(d_boxes=d_boxes, g_boxes=g_boxes, iscrowd=iscrowd) + return ious + + @classmethod + def _iou(cls, d_boxes: List[List[int]], g_boxes: List[List[int]], iscrowd: List[int]) -> np.ndarray: + def _single_iou(d_box: List[int], g_box: List[int], iscrowd: int) -> float: + """ box: xywh """ + i_w = min(d_box[2] + d_box[0], g_box[2] + g_box[0]) - max(d_box[0], g_box[0]) + if i_w <= 0: + return 0 + i_h = min(d_box[3] + d_box[1], g_box[3] + g_box[1]) - max(d_box[1], g_box[1]) + if i_h <= 0: + return 0 + i_area = i_w * i_h + u_area = d_box[2] * d_box[3] + g_box[2] * g_box[3] - i_area if not iscrowd else d_box[2] * d_box[3] + return i_area / u_area + + ious = np.zeros((len(d_boxes), len(g_boxes)), dtype=np.double) + for d_idx, d_box in enumerate(d_boxes): + for g_idx, g_box in enumerate(g_boxes): + ious[d_idx, g_idx] = _single_iou(d_box, g_box, iscrowd[g_idx]) + return ious + + def evaluateImg(self, imgIdx: int, catId: int, aRng: Any, maxDet: int) -> Optional[dict]: + ''' + perform evaluation for single category and image + + Args: + imgIdx (int): image / asset ordered index + catId (int): category / class id + aRng (List[float]): area range (lower and upper bound) + maxDet (int): + + Returns: + dict (single image results) + ''' + gt = self._gts[imgIdx, catId] + dt = self._dts[imgIdx, catId] + if len(gt) == 0 and len(dt) == 0: + return None + + for g in gt: + if g['ignore'] or (g['area'] < aRng[0] or g['area'] > aRng[1]): + g['_ignore'] = 1 + else: + g['_ignore'] = 0 + + # sort dt highest score first, sort gt ignore last + gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort') + gt = [gt[i] for i in gtind] + dtind = np.argsort([-d['score'] for d in dt], kind='mergesort') + dt = [dt[i] for i in dtind[0:maxDet]] + iscrowd = [int(o['iscrowd']) for o in gt] + # load computed ious + ious = self.ious[imgIdx, catId][:, gtind] if len(self.ious[imgIdx, catId]) > 0 else self.ious[imgIdx, catId] + + p = self.params + T = len(p.iouThrs) + G = len(gt) + D = len(dt) + gtm = np.zeros((T, G)) # gtm[i, j]: dt annotation id matched by j-th gt in i-th iou thr, iouThrs x gts + dtm = np.zeros((T, D)) # dtm[i, j]: gt annotation id matched by j-th dt in i-th iou thr, iouThrs x dets + gtIg = np.array([g['_ignore'] for g in gt]) # gt ignore + dtIg = np.zeros((T, D)) # dt ignore + if not len(ious) == 0: + for tind, t in enumerate(p.iouThrs): + for dind, d in enumerate(dt): + # information about best match so far (m=-1 -> unmatched) + iou = min([t, 1 - 1e-10]) + m = -1 # best matched gind for current dind, -1 for unmatch + for gind, g in enumerate(gt): + # if this gt already matched, and not a crowd, continue + if gtm[tind, gind] > 0 and not iscrowd[gind]: + continue + # if dt matched to reg gt, and on ignore gt, stop + if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1: + break + # continue to next gt unless better match made + if ious[dind, gind] < iou: + continue + # if match successful and best so far, store appropriately + iou = ious[dind, gind] + m = gind + # if match made store id of match for both dt and gt + if m == -1: + continue + dtIg[tind, dind] = gtIg[m] + dtm[tind, dind] = gt[m]['id'] + gtm[tind, m] = d['id'] + # set unmatched detections outside of area range to ignore + a = np.array([d['area'] < aRng[0] or d['area'] > aRng[1] for d in dt]).reshape((1, len(dt))) + dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0))) + # store results for given image and category + return { + 'image_id': imgIdx, + 'category_id': catId, + 'aRng': aRng, + 'maxDet': maxDet, + 'dtIds': [d['id'] for d in dt], + 'gtIds': [g['id'] for g in gt], + 'dtMatches': dtm, + 'gtMatches': gtm, + 'dtScores': [d['score'] for d in dt], + 'gtIgnore': gtIg, + 'dtIgnore': dtIg, + } + + def accumulate(self, p: 'Params' = None) -> None: + ''' + Accumulate per image evaluation results and store the result in self.eval + :param p: input params for evaluation + :return: None + ''' + if not self.evalImgs: + raise ValueError('Please run evaluate() first') + + # allows input customized parameters + if p is None: + p = self.params + T = len(p.iouThrs) + R = len(p.recThrs) + K = len(p.catIds) + A = len(p.areaRng) + M = len(p.maxDets) + precision = -np.ones((T, R, K, A, M)) # -1 for the precision of absent categories + recall = -np.ones((T, K, A, M)) + scores = -np.ones((T, R, K, A, M)) + all_tps = np.zeros((T, K, A, M)) + all_fps = np.zeros((T, K, A, M)) + all_fns = np.zeros((T, K, A, M)) + + # create dictionary for future indexing + catIds = self.params.catIds + setK: set = set(catIds) + setA: Set[tuple] = set(map(tuple, self.params.areaRng)) + setM: set = set(self.params.maxDets) + setI: set = set(self.params.imgIdxes) + # get inds to evaluate + k_list = [n for n, k in enumerate(p.catIds) if k in setK] + m_list = [m for n, m in enumerate(p.maxDets) if m in setM] + a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA] + i_list = [n for n, i in enumerate(p.imgIdxes) if i in setI] + I0 = len(self.params.imgIdxes) + A0 = len(self.params.areaRng) + # retrieve E at each category, area range, and max number of detections + for k, k0 in enumerate(k_list): + Nk = k0 * A0 * I0 + for a, a0 in enumerate(a_list): + Na = a0 * I0 + for m, maxDet in enumerate(m_list): + E = [self.evalImgs[Nk + Na + i] for i in i_list] + E = [e for e in E if e is not None] + if len(E) == 0: + continue + dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E]) + if len(dtScores) == 0: + continue + + # different sorting method generates slightly different results. + # mergesort is used to be consistent as Matlab implementation. + inds = np.argsort(-dtScores, kind='mergesort') + dtScoresSorted = dtScores[inds] + + dtm = np.concatenate([e['dtMatches'][:, 0:maxDet] for e in E], axis=1)[:, inds] + dtIg = np.concatenate([e['dtIgnore'][:, 0:maxDet] for e in E], axis=1)[:, inds] + gtIg = np.concatenate([e['gtIgnore'] for e in E]) + npig = np.count_nonzero(gtIg == 0) + if npig == 0: + continue + + tps = np.logical_and(dtm, np.logical_not(dtIg)) # iouThrs x dts + fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg)) # iouThrs x dts + tp_sum = np.cumsum(tps, axis=1).astype(dtype=float) + fp_sum = np.cumsum(fps, axis=1).astype(dtype=float) + + all_tps[:, k, a, m] = tp_sum[:, -1] + all_fps[:, k, a, m] = fp_sum[:, -1] + all_fns[:, k, a, m] = npig - all_tps[:, k, a, m] + + for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): + tp = np.array(tp) + fp = np.array(fp) + nd = len(tp) + rc = tp / npig + pr = tp / (fp + tp + np.spacing(1)) + q = np.zeros((R, )) + ss = np.zeros((R, )) + + if nd: + recall[t, k, a, m] = rc[-1] + else: + recall[t, k, a, m] = 0 + + # numpy is slow without cython optimization for accessing elements + # use python array gets significant speed improvement + pr = pr.tolist() + q = q.tolist() + + for i in range(nd - 1, 0, -1): + if pr[i] > pr[i - 1]: + pr[i - 1] = pr[i] + + inds = np.searchsorted(rc, p.recThrs, side='left') + try: + for ri, pi in enumerate(inds): + q[ri] = pr[pi] + ss[ri] = dtScoresSorted[pi] + except Exception: + pass + precision[t, :, k, a, m] = np.array(q) + scores[t, :, k, a, m] = np.array(ss) + + self.eval = { + 'params': p, + 'counts': [T, R, K, A, M], + 'precision': precision, + 'recall': recall, + 'scores': scores, + 'all_fps': all_fps, + 'all_tps': all_tps, + 'all_fns': all_fns, + } + + def get_evaluation_result(self) -> mirpb.SingleDatasetEvaluation: + if not self.eval: + raise ValueError('Please run accumulate() first') + + evaluation_result = mirpb.SingleDatasetEvaluation() + evaluation_result.conf_thr = self.params.confThr + + # iou evaluations + for iou_thr_index, iou_thr in enumerate(self.params.iouThrs): + iou_evaluation = self._get_iou_evaluation_result(iou_thr_index=iou_thr_index) + evaluation_result.iou_evaluations[f"{iou_thr:.2f}"].CopyFrom(iou_evaluation) + + # average evaluation + evaluation_result.iou_averaged_evaluation.CopyFrom(self._get_iou_evaluation_result()) + + return evaluation_result + + def _get_iou_evaluation_result(self, iou_thr_index: int = None) -> mirpb.SingleIouEvaluation: + iou_evaluation = mirpb.SingleIouEvaluation() + + # ci evaluations: category / class ids + for class_id_index, class_id in enumerate(self.params.catIds): + topic_evaluation = self._get_topic_evaluation_result(iou_thr_index, class_id_index) + iou_evaluation.ci_evaluations[class_id].CopyFrom(topic_evaluation) + # class average + topic_evaluation = self._get_topic_evaluation_result(iou_thr_index, None) + iou_evaluation.ci_averaged_evaluation.CopyFrom(topic_evaluation) + + return iou_evaluation + + def _get_topic_evaluation_result(self, iou_thr_index: Optional[int], + class_id_index: Optional[int]) -> mirpb.SingleTopicEvaluation: + def _get_tp_tn_or_fn(iou_thr_index: Optional[int], class_id_index: Optional[int], area_ranges_index: int, + max_dets_index: int, array: np.ndarray) -> int: + """ + extract tp, tn and fn from `array` + + `array` comes from self.eval's all_tps, all_tns and all_fns, they all have the same structure: + iouThrs x catIds x aRngs x maxDets + """ + if iou_thr_index is not None: + array = array[[iou_thr_index]] + if class_id_index is not None: + array = array[:, class_id_index, area_ranges_index, max_dets_index] + else: + array = np.sum(array[:, :, area_ranges_index, max_dets_index], axis=1) + return int(array[0]) + + topic_evaluation = mirpb.SingleTopicEvaluation() + + # from _summarize + area_ranges_index = 0 # area range: 'all' + max_dets_index = len(self.params.maxDets) - 1 # last max det number + + # average precision + # precision dims: iouThrs * recThrs * catIds * areaRanges * maxDets + precisions: np.ndarray = self.eval['precision'] + if iou_thr_index is not None: + precisions = precisions[[iou_thr_index]] + if class_id_index is not None: + precisions = precisions[:, :, class_id_index, area_ranges_index, max_dets_index] + else: + precisions = precisions[:, :, :, area_ranges_index, max_dets_index] + precisions[precisions <= -1] = 0 + topic_evaluation.ap = np.mean(precisions) if len(precisions) > 0 else -1 + + # average recall + # recall dims: iouThrs * catIds * areaRanges * maxDets + recalls: np.ndarray = self.eval['recall'] + if iou_thr_index is not None: + recalls = recalls[[iou_thr_index]] + if class_id_index is not None: + recalls = recalls[:, class_id_index, area_ranges_index, max_dets_index] + else: + recalls = recalls[:, :, area_ranges_index, max_dets_index] + recalls[recalls <= -1] = 0 + topic_evaluation.ar = np.mean(recalls) if len(recalls) > 0 else -1 + + # true positive + topic_evaluation.tp = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, + class_id_index=class_id_index, + area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + array=self.eval['all_tps']) + + # false positive + topic_evaluation.fp = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, + class_id_index=class_id_index, + area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + array=self.eval['all_fps']) + + # false negative + topic_evaluation.fn = _get_tp_tn_or_fn(iou_thr_index=iou_thr_index, + class_id_index=class_id_index, + area_ranges_index=area_ranges_index, + max_dets_index=max_dets_index, + array=self.eval['all_fns']) + + # pr curve + if self.params.need_pr_curve and iou_thr_index is not None and class_id_index is not None: + precisions = self.eval['precision'][iou_thr_index, :, class_id_index, area_ranges_index, max_dets_index] + for recall_thr_index, recall_thr in enumerate(self.params.recThrs): + pr_point = mirpb.FloatPoint(x=recall_thr, y=precisions[recall_thr_index]) + topic_evaluation.pr_curve.append(pr_point) + + return topic_evaluation + + def summarize(self) -> None: + ''' + Compute and display summary metrics for evaluation results. + Note this functin can *only* be applied on the default parameter setting + ''' + def _summarize(ap: int = 1, iouThr: float = None, areaRng: str = 'all', maxDets: int = 100) -> float: + p = self.params + + aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng] # areaRanges index + mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets] # maxDets index + if ap == 1: + # dimension of precision: [TxRxKxAxM] iouThrs * recThrs * catIds * areaRanges * maxDets + s = self.eval['precision'] + # IoU + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, :, aind, mind] + else: + # dimension of recall: [TxKxAxM] iouThrs * catIds * areaRanges * maxDets + s = self.eval['recall'] + if iouThr is not None: + t = np.where(iouThr == p.iouThrs)[0] + s = s[t] + s = s[:, :, aind, mind] + if len(s[s > -1]) == 0: + mean_s = -1 + else: + mean_s = np.mean(s[s > -1]) + return mean_s + + def _summarizeDets() -> np.ndarray: + stats = np.zeros((12, )) + stats[0] = _summarize(1) + stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2]) + stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2]) + stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2]) + stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2]) + stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2]) + stats[6] = _summarize(0, maxDets=self.params.maxDets[0]) + stats[7] = _summarize(0, maxDets=self.params.maxDets[1]) + stats[8] = _summarize(0, maxDets=self.params.maxDets[2]) + stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2]) + stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2]) + stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2]) + return stats + + if not self.eval: + raise Exception('Please run accumulate() first') + self.stats = _summarizeDets() + + +class Params: + def __init__(self) -> None: + self.iouType = 'bbox' + self.catIds: List[int] = [] + self.imgIdxes: List[int] = [] + # np.arange causes trouble. the data point on arange is slightly larger than the true value + self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) # iou threshold + self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True) # recall threshold + self.maxDets = [1, 10, 100] + self.areaRng: List[list] = [[0**2, 1e5**2], [0**2, 32**2], [32**2, 96**2], [96**2, 1e5**2]] # area range + self.areaRngLbl = ['all', 'small', 'medium', 'large'] # area range label + self.confThr = 0.3 # confidence threshold + self.need_pr_curve = False + + +def det_evaluate(mir_dts: List[MirCoco], mir_gt: MirCoco, config: mirpb.EvaluateConfig) -> mirpb.Evaluation: + iou_thr_from, iou_thr_to, iou_thr_step = [float(v) for v in config.iou_thrs_interval.split(':')] + for thr in [config.conf_thr, iou_thr_from, iou_thr_to, iou_thr_step]: + if thr < 0 or thr > 1: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='invalid conf_thr, iou_thr_from, iou_thr_to or iou_thr_step') + if iou_thr_from >= iou_thr_to: + raise MirRuntimeError(error_code=MirCode.RC_CMD_INVALID_ARGS, + error_message='invalid iou_thr_from or iou_thr_to') + params = Params() + params.confThr = config.conf_thr + params.iouThrs = np.linspace(start=iou_thr_from, + stop=iou_thr_to, + num=int(np.round((iou_thr_to - iou_thr_from) / iou_thr_step)), + endpoint=False) + params.need_pr_curve = config.need_pr_curve + + evaluation = mirpb.Evaluation() + evaluation.config.CopyFrom(config) + + for mir_dt in mir_dts: + evaluator = MirDetEval(coco_gt=mir_gt, coco_dt=mir_dt, params=params) + evaluator.evaluate() + evaluator.accumulate() + + single_dataset_evaluation = evaluator.get_evaluation_result() + single_dataset_evaluation.conf_thr = config.conf_thr + single_dataset_evaluation.gt_dataset_id = mir_gt.dataset_id + single_dataset_evaluation.pred_dataset_id = mir_dt.dataset_id + evaluation.dataset_evaluations[mir_dt.dataset_id].CopyFrom(single_dataset_evaluation) + + return evaluation diff --git a/ymir/command/mir/tools/mir_storage_ops.py b/ymir/command/mir/tools/mir_storage_ops.py index e17b075121..343012aa40 100644 --- a/ymir/command/mir/tools/mir_storage_ops.py +++ b/ymir/command/mir/tools/mir_storage_ops.py @@ -353,6 +353,20 @@ def load_assets_content(cls, mir_root: str, mir_branch: str, mir_task_id: str = class_ids_index={k: v["asset_ids"] for k, v in mir_storage_keywords["index_predifined_keyids"].items()}, ) + @classmethod + def load_dataset_evaluations(cls, mir_root: str, mir_branch: str, mir_task_id: str = '') -> dict: + mir_storage_data: mirpb.MirTasks = cls.load_single_storage(mir_root=mir_root, + mir_branch=mir_branch, + ms=mirpb.MirStorage.MIR_TASKS, + mir_task_id=mir_task_id, + as_dict=False) + task = mir_storage_data.tasks[mir_storage_data.head_task_id] + if not task.evaluation.dataset_evaluations: + raise MirError(error_code=MirCode.RC_CMD_INVALID_ARGS, error_message="no dataset evaluation") + + dataset_evaluations = cls.__message_to_dict(task.evaluation) + return dataset_evaluations["dataset_evaluations"] + def create_task(task_type: 'mirpb.TaskType.V', task_id: str, @@ -365,6 +379,7 @@ def create_task(task_type: 'mirpb.TaskType.V', serialized_task_parameters: str = '', serialized_executor_config: str = '', executor: str = '', + evaluation: mirpb.Evaluation = None, src_revs: str = '', dst_rev: str = '') -> mirpb.Task: task_dict = { @@ -388,4 +403,7 @@ def create_task(task_type: 'mirpb.TaskType.V', task: mirpb.Task = mirpb.Task() json_format.ParseDict(task_dict, task) + if evaluation: + task.evaluation.CopyFrom(evaluation) + return task diff --git a/ymir/command/mir/version.py b/ymir/command/mir/version.py index 75c1ac35a8..e2d37f7f10 100644 --- a/ymir/command/mir/version.py +++ b/ymir/command/mir/version.py @@ -1,2 +1,2 @@ # Package version -__version__ = '1.0.0' +__version__ = '1.1.0' diff --git a/ymir/command/proto/mir_command.proto b/ymir/command/proto/mir_command.proto index f3718ebc96..570513d396 100644 --- a/ymir/command/proto/mir_command.proto +++ b/ymir/command/proto/mir_command.proto @@ -40,6 +40,7 @@ enum TaskType { TaskTypeFusion = 11; TaskTypeInit = 12; TaskTypeImportModel = 13; + TaskTypeEvaluate = 16; reserved 14, 15; }; @@ -169,6 +170,7 @@ message Task { map unknown_types = 7; int32 return_code = 8; string return_msg = 9; + Evaluation evaluation = 10; string serialized_task_parameters = 102; string serialized_executor_config = 103; @@ -188,6 +190,48 @@ message ModelMeta { string context = 3; }; +message Evaluation { + EvaluateConfig config = 1; + // key: prediction dataset id, value: evaluation result for ground truth and prediction dataset + map dataset_evaluations = 2; +} + +message EvaluateConfig { + string gt_dataset_id = 1; + repeated string pred_dataset_ids = 2; + float conf_thr = 3; + string iou_thrs_interval = 4; + bool need_pr_curve = 5; +} + +message SingleDatasetEvaluation { + float conf_thr = 1; + string gt_dataset_id = 2; + string pred_dataset_id = 3; + map iou_evaluations = 4; // key: string of iou threshold + SingleIouEvaluation iou_averaged_evaluation = 5; // average for all ious +} + +message SingleIouEvaluation { + map ci_evaluations = 1; // key: class ids + SingleTopicEvaluation ci_averaged_evaluation = 2; // evaluations averaged by class ids + map topic_evaluations = 3; // key: topic names +} + +message SingleTopicEvaluation { + float ap = 1; + float ar = 2; + int32 tp = 3; + int32 fp = 4; + int32 fn = 5; + repeated FloatPoint pr_curve = 6; +} + +message FloatPoint { + float x = 1; + float y = 2; +} + /// ========== context.mir ========== message MirContext { /// total images count diff --git a/ymir/command/requirements.txt b/ymir/command/requirements.txt index 4d72b3c45e..d943a919f9 100644 --- a/ymir/command/requirements.txt +++ b/ymir/command/requirements.txt @@ -1,6 +1,8 @@ Pillow>=8.2.0 fasteners>=0.16.3 +numpy==1.21.2 protobuf==3.18.1 pydantic>=1.8.2 pyyaml>=5.4.1 requests>=2.25.1 +tensorboardX>=2.4.1 diff --git a/ymir/command/setup.cfg b/ymir/command/setup.cfg index 687b2e165b..83fd78fca8 100644 --- a/ymir/command/setup.cfg +++ b/ymir/command/setup.cfg @@ -1,5 +1,5 @@ [bumpversion] -current_version = 1.0.0 +current_version = 1.1.0 [bumpversion:file:mir/version.py] search = '{current_version}' diff --git a/ymir/command/tests/assets/2007_000032.xml b/ymir/command/tests/assets/2007_000032.xml index 03624c8b5d..1a4e3fde70 100755 --- a/ymir/command/tests/assets/2007_000032.xml +++ b/ymir/command/tests/assets/2007_000032.xml @@ -23,6 +23,7 @@ 375 183 + 0.5 aeroplane diff --git a/ymir/command/tests/unit/test_cmd_evaluate.py b/ymir/command/tests/unit/test_cmd_evaluate.py new file mode 100644 index 0000000000..58cab98077 --- /dev/null +++ b/ymir/command/tests/unit/test_cmd_evaluate.py @@ -0,0 +1,283 @@ +import os +import shutil +import unittest + +from google.protobuf import json_format + +from mir.commands import evaluate +from mir.protos import mir_command_pb2 as mirpb +from mir.tools import mir_storage_ops +from mir.tools.code import MirCode +from tests import utils as test_utils + + +class TestCmdEvaluate(unittest.TestCase): + # life cycle + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName) + self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) + self._working_root = os.path.join(self._test_root, 'work') + self._mir_root = os.path.join(self._test_root, 'mir-root') + + def setUp(self) -> None: + self._prepare_dirs() + test_utils.prepare_labels(mir_root=self._mir_root, names=['person', 'cat', 'tv']) + self._prepare_mir_repo() + return super().setUp() + + def tearDown(self) -> None: + self._deprepare_dirs() + return super().tearDown() + + # protected: setup and teardown + def _prepare_dirs(self) -> None: + test_utils.remake_dirs(self._test_root) + test_utils.remake_dirs(self._working_root) + test_utils.remake_dirs(self._mir_root) + + def _prepare_mir_repo(self) -> None: + test_utils.mir_repo_init(self._mir_root) + self._prepare_mir_repo_branch_a() + self._prepare_mir_repo_branch_b() + + def _prepare_mir_repo_branch_a(self) -> None: + metadatas_dict = { + 'attributes': { + 'a0': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a1': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a2': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + } + } + } + mir_metadatas = mirpb.MirMetadatas() + json_format.ParseDict(metadatas_dict, mir_metadatas) + + annotations_dict = { + 'task_annotations': { + 'a': { + 'image_annotations': { + 'a0': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 50, + 'y': 50, + 'w': 50, + 'h': 50, + }, + 'class_id': 0, + 'score': 1, + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 75, + 'h': 75, + }, + 'class_id': 0, + 'score': 1, + }, { + 'index': 2, + 'box': { + 'x': 150, + 'y': 150, + 'w': 75, + 'h': 75, + }, + 'class_id': 1, + 'score': 1, + }, { + 'index': 3, + 'box': { + 'x': 350, + 'y': 50, + 'w': 100, + 'h': 100, + }, + 'class_id': 2, + 'score': 1, + }] + }, + 'a1': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 300, + 'y': 300, + 'w': 100, + 'h': 100, + }, + 'class_id': 2, + 'score': 1, + }] + }, + } + } + }, + 'head_task_id': 'a' + } + mir_annotations = mirpb.MirAnnotations() + json_format.ParseDict(annotations_dict, mir_annotations) + + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, + mir_branch='a', + his_branch='master', + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mir_metadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, + }, + task=task) + + def _prepare_mir_repo_branch_b(self) -> None: + metadatas_dict = { + 'attributes': { + 'a0': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a1': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a2': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + } + } + } + mir_metadatas = mirpb.MirMetadatas() + json_format.ParseDict(metadatas_dict, mir_metadatas) + + annotations_dict = { + 'task_annotations': { + 'b': { + 'image_annotations': { + 'a0': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 50, + 'y': 50, + 'w': 50, + 'h': 50, + }, + 'class_id': 0, + 'score': 0.7, + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 75, + 'h': 75, + }, + 'class_id': 0, + 'score': 0.8, + }, { + 'index': 2, + 'box': { + 'x': 150, + 'y': 150, + 'w': 75, + 'h': 75, + }, + 'class_id': 1, + 'score': 0.9, + }, { + 'index': 3, + 'box': { + 'x': 350, + 'y': 50, + 'w': 100, + 'h': 100, + }, + 'class_id': 2, + 'score': 0.9, + }] + }, + 'a1': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 300, + 'y': 300, + 'w': 100, + 'h': 100, + }, + 'class_id': 2, + 'score': 0.9, + }] + }, + } + } + }, + 'head_task_id': 'b' + } + mir_annotations = mirpb.MirAnnotations() + json_format.ParseDict(annotations_dict, mir_annotations) + + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='b', message='import') + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, + mir_branch='b', + his_branch='master', + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mir_metadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, + }, + task=task) + + def _deprepare_dirs(self) -> None: + if os.path.isdir(self._test_root): + shutil.rmtree(self._test_root) + + # public: test cases + def test_00(self) -> None: + fake_args = type('', (), {})() + fake_args.mir_root = self._mir_root + fake_args.work_dir = self._working_root + fake_args.src_revs = 'a;b' + fake_args.gt_rev = 'b' + fake_args.dst_rev = 'c@c' + fake_args.conf_thr = 0.3 + fake_args.iou_thrs = '0.5:0.95:0.05' + fake_args.need_pr_curve = False + evaluate_instance = evaluate.CmdEvaluate(fake_args) + return_code = evaluate_instance.run() + + self.assertEqual(return_code, MirCode.RC_OK) + + # check evaluation result + mir_tasks: mirpb.MirTasks = mir_storage_ops.MirStorageOps.load_single_storage(mir_root=self._mir_root, + mir_branch='c', + mir_task_id='c', + ms=mirpb.MirStorage.MIR_TASKS) + evaluation_result = mir_tasks.tasks[mir_tasks.head_task_id].evaluation + self.assertEqual({'a', 'b'}, set(evaluation_result.dataset_evaluations.keys())) diff --git a/ymir/command/tests/unit/test_cmd_import.py b/ymir/command/tests/unit/test_cmd_import.py index 2c9c40823c..fc84e7c7e1 100644 --- a/ymir/command/tests/unit/test_cmd_import.py +++ b/ymir/command/tests/unit/test_cmd_import.py @@ -116,7 +116,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 94, 'h': 67 }, - 'class_id': 1 + 'class_id': 1, + 'score': 2.0, }] }, '430df22960b0f369318705800139fcc8ec38a3e4': { @@ -127,7 +128,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 272, 'h': 106 }, - 'class_id': 1 + 'class_id': 1, + 'score': 0.5, }, { 'index': 1, 'box': { @@ -136,7 +138,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 65, 'h': 36 }, - 'class_id': 1 + 'class_id': 1, + 'score': 2.0, }] } } @@ -152,7 +155,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 94, 'h': 67 }, - 'class_id': 1 + 'class_id': 1, + 'score': 2.0, }] }, '430df22960b0f369318705800139fcc8ec38a3e4': { @@ -163,7 +167,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 272, 'h': 106 }, - 'class_id': 1 + 'class_id': 1, + 'score': 0.5, }, { 'index': 1, 'box': { @@ -172,7 +177,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 65, 'h': 36 }, - 'class_id': 1 + 'class_id': 1, + 'score': 2.0, }, { 'index': 2, 'box': { @@ -181,7 +187,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 19, 'h': 50 }, - 'class_id': 2 + 'class_id': 2, + 'score': 2.0, }, { 'index': 3, 'box': { @@ -190,7 +197,8 @@ def _check_repo(self, repo_root: str, with_person_ignored: bool, with_annotation 'w': 19, 'h': 50 }, - 'class_id': 2 + 'class_id': 2, + 'score': 2.0, }] } } diff --git a/ymir/command/tests/unit/test_cmd_import_model.py b/ymir/command/tests/unit/test_cmd_import_model.py index 2fe604aad2..6af7e64d07 100644 --- a/ymir/command/tests/unit/test_cmd_import_model.py +++ b/ymir/command/tests/unit/test_cmd_import_model.py @@ -51,8 +51,9 @@ def _deprepare_dirs(self): def _prepare_model(self): with open(os.path.join(self._src_model_root, 'best.weights'), 'w') as f: f.write('fake darknet weights model') + # note: unknown-car is not in user labels, we still expect it success model_storage = mir_utils.ModelStorage(models=['best.weights'], - executor_config={'class_names': ['cat', 'person']}, + executor_config={'class_names': ['cat', 'person', 'unknown-car']}, task_context={ mir_settings.PRODUCER_KEY: mir_settings.PRODUCER_NAME, 'mAP': 0.5 diff --git a/ymir/command/tests/unit/test_cmd_infer.py b/ymir/command/tests/unit/test_cmd_infer.py index 7809d0f753..520641ecbd 100644 --- a/ymir/command/tests/unit/test_cmd_infer.py +++ b/ymir/command/tests/unit/test_cmd_infer.py @@ -18,6 +18,7 @@ class TestCmdInfer(unittest.TestCase): def __init__(self, methodName: str = ...) -> None: super().__init__(methodName=methodName) self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) + self._mir_repo_root = os.path.join(self._test_root, 'mir-demo-repo') self._models_location = os.path.join(self._test_root, 'models') self._src_assets_root = os.path.join(self._test_root, 'assets') # source assets, index and infer config file self._working_root = os.path.join(self._test_root, 'work') # work directory for cmd infer @@ -26,6 +27,7 @@ def __init__(self, methodName: str = ...) -> None: def setUp(self) -> None: self._prepare_dir() + self._prepare_mir_root() self._prepare_assets() self._prepare_model() self._prepare_config_file() @@ -47,6 +49,10 @@ def _prepare_dir(self): def _deprepare_dir(self): shutil.rmtree(self._test_root) + def _prepare_mir_root(self): + test_utils.mir_repo_init(self._mir_repo_root) + test_utils.prepare_labels(mir_root=self._mir_repo_root, names=['person', 'cat']) + def _prepare_assets(self): test_assets_root = TestCmdInfer._test_assets_root() shutil.copyfile(src=os.path.join(test_assets_root, '2007_000032.jpg'), @@ -69,7 +75,7 @@ def _prepare_model(self): training_config = yaml.safe_load(f.read()) training_config['anchors'] = '12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401' - training_config['class_names'] = ['person', 'cat'] + training_config['class_names'] = ['person', 'cat', 'unknown-car'] model_storage = mir_utils.ModelStorage(models=['model.params', 'model.json'], executor_config=training_config, @@ -131,6 +137,7 @@ def _mock_run_docker_cmd(*args, **kwargs): def test_00(self, mock_run): fake_args = type('', (), {})() fake_args.work_dir = self._working_root + fake_args.mir_root = self._mir_repo_root fake_args.model_location = self._models_location fake_args.model_hash = 'fake_model_hash' fake_args.index_file = self._assets_index_file diff --git a/ymir/command/tests/unit/test_cmd_init.py b/ymir/command/tests/unit/test_cmd_init.py index 2ebd37c3a6..34c32f78a4 100644 --- a/ymir/command/tests/unit/test_cmd_init.py +++ b/ymir/command/tests/unit/test_cmd_init.py @@ -2,8 +2,6 @@ import shutil import unittest -import yaml - from tests import utils as test_utils from mir.commands import init @@ -18,9 +16,8 @@ def test_init(self): if os.path.isdir(test_root): shutil.rmtree(test_root) os.makedirs(test_root) - test_utils.prepare_labels(mir_root=test_root, names=['xbox', 'person', 'cat']) - init.CmdInit.run_with_args(mir_root=test_root, project_class_names='cat;person', empty_rev='a@a') + init.CmdInit.run_with_args(mir_root=test_root, empty_rev='a@a') assert (os.path.isdir(os.path.join(test_root, ".git"))) assert (os.path.isdir(os.path.join(test_root, ".mir"))) @@ -30,10 +27,3 @@ def test_init(self): with open(ignore_file_path, 'r') as f: lines = f.read().splitlines() assert '.mir' in lines - - # check project context file - project_context_file_path = os.path.join(test_root, '.mir', 'context.yaml') - assert os.path.isfile(project_context_file_path) - with open(project_context_file_path, 'r') as f: - context_obj = yaml.safe_load(f) - assert context_obj['project']['class_ids'] == [2, 1] diff --git a/ymir/command/tests/unit/test_cmd_mining.py b/ymir/command/tests/unit/test_cmd_mining.py index 9f1a57b4fc..655dcfc074 100644 --- a/ymir/command/tests/unit/test_cmd_mining.py +++ b/ymir/command/tests/unit/test_cmd_mining.py @@ -61,6 +61,15 @@ def _mock_run_func(*args, **kwargs): }, 'score': 0.5, 'class_name': 'cat', + }, { + 'box': { + 'x': 50, + 'y': 0, + 'w': 30, + 'h': 30 + }, + 'score': 0.5, + 'class_name': 'unknown-car', # unknown class name, should be ignored }, ], }, @@ -73,7 +82,7 @@ def _mock_run_func(*args, **kwargs): def _mock_prepare_model(*args, **kwargs): model_storage = mir_utils.ModelStorage(models=['0.params'], - executor_config={'class_names': ['person', 'cat']}, + executor_config={'class_names': ['person', 'cat', 'unknown-car']}, task_context={'task_id': '0'}) return model_storage @@ -187,6 +196,7 @@ def test_mining_cmd_00(self, mock_prepare, mock_run): mining_instance.run() mock_run.assert_called_once_with(work_dir=args.work_dir, + mir_root=args.mir_root, media_path=os.path.join(args.work_dir, 'in', 'assets'), model_location=args.model_location, model_hash=args.model_hash, diff --git a/ymir/command/tests/unit/test_tools_det_eval.py b/ymir/command/tests/unit/test_tools_det_eval.py new file mode 100644 index 0000000000..c1749a4cd3 --- /dev/null +++ b/ymir/command/tests/unit/test_tools_det_eval.py @@ -0,0 +1,306 @@ +from collections import Counter +import json +import os +import shutil +import unittest + +from google.protobuf import json_format +import numpy as np + +from mir.protos import mir_command_pb2 as mirpb +from mir.tools import det_eval, mir_storage_ops, revs_parser +from tests import utils as test_utils + + +class TestToolsDetEval(unittest.TestCase): + # life cycle + def __init__(self, methodName: str = ...) -> None: + super().__init__(methodName) + self._test_root = test_utils.dir_test_root(self.id().split('.')[-3:]) + self._working_root = os.path.join(self._test_root, 'work') + self._mir_root = os.path.join(self._test_root, 'mir-root') + + def setUp(self) -> None: + self._prepare_dirs() + test_utils.prepare_labels(mir_root=self._mir_root, names=['person', 'cat', 'tv', 'dog']) + self._prepare_mir_repo() + return super().setUp() + + def tearDown(self) -> None: + self._deprepare_dirs() + return super().tearDown() + + # protected: setup and teardown + def _prepare_dirs(self) -> None: + test_utils.remake_dirs(self._test_root) + test_utils.remake_dirs(self._working_root) + test_utils.remake_dirs(self._mir_root) + + def _prepare_mir_repo(self) -> None: + test_utils.mir_repo_init(self._mir_root) + self._prepare_mir_repo_branch_a() + self._prepare_mir_repo_branch_b() + + def _prepare_mir_repo_branch_a(self) -> None: + """ branch a: a ground truth branch """ + metadatas_dict = { + 'attributes': { + 'a0': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a1': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a2': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + } + } + } + mir_metadatas = mirpb.MirMetadatas() + json_format.ParseDict(metadatas_dict, mir_metadatas) + + annotations_dict = { + 'task_annotations': { + 'a': { + 'image_annotations': { + 'a0': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 50, + 'y': 50, + 'w': 50, + 'h': 50, + }, + 'class_id': 0, + 'score': 1, + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 75, + 'h': 75, + }, + 'class_id': 0, + 'score': 1, + }, { + 'index': 2, + 'box': { + 'x': 150, + 'y': 150, + 'w': 75, + 'h': 75, + }, + 'class_id': 1, + 'score': 1, + }, { + 'index': 3, + 'box': { + 'x': 350, + 'y': 50, + 'w': 100, + 'h': 100, + }, + 'class_id': 2, + 'score': 1, + }] + }, + 'a1': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 300, + 'y': 300, + 'w': 100, + 'h': 100, + }, + 'class_id': 2, + 'score': 1, + }] + }, + } + } + }, + 'head_task_id': 'a' + } + mir_annotations = mirpb.MirAnnotations() + json_format.ParseDict(annotations_dict, mir_annotations) + + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='a', message='import') + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, + mir_branch='a', + his_branch='master', + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mir_metadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, + }, + task=task) + + def _prepare_mir_repo_branch_b(self) -> None: + """ branch b: a prediction / detection branch """ + metadatas_dict = { + 'attributes': { + 'a0': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a1': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + }, + 'a2': { + 'assetType': 'AssetTypeImageJpeg', + 'tvtType': 'TvtTypeUnknown', + 'width': 500, + 'height': 500, + 'imageChannels': 3 + } + } + } + mir_metadatas = mirpb.MirMetadatas() + json_format.ParseDict(metadatas_dict, mir_metadatas) + + annotations_dict = { + 'task_annotations': { + 'b': { + 'image_annotations': { + 'a0': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 45, + 'y': 45, + 'w': 52, + 'h': 52, + }, + 'class_id': 0, + 'score': 0.7, + }, { + 'index': 1, + 'box': { + 'x': 150, + 'y': 50, + 'w': 73, + 'h': 73, + }, + 'class_id': 0, + 'score': 0.8, + }, { + 'index': 2, + 'box': { + 'x': 350, + 'y': 50, + 'w': 76, + 'h': 76, + }, + 'class_id': 0, + 'score': 0.9, + }, { + 'index': 3, + 'box': { + 'x': 150, + 'y': 160, + 'w': 78, + 'h': 78, + }, + 'class_id': 1, + 'score': 0.9, + }, { + 'index': 4, + 'box': { + 'x': 350, + 'y': 50, + 'w': 102, + 'h': 103, + }, + 'class_id': 2, + 'score': 0.9, + }] + }, + 'a1': { + 'annotations': [{ + 'index': 0, + 'box': { + 'x': 300, + 'y': 300, + 'w': 103, + 'h': 110, + }, + 'class_id': 2, + 'score': 0.9, + }] + }, + } + } + }, + 'head_task_id': 'b' + } + mir_annotations = mirpb.MirAnnotations() + json_format.ParseDict(annotations_dict, mir_annotations) + + task = mir_storage_ops.create_task(task_type=mirpb.TaskType.TaskTypeImportData, task_id='b', message='import') + mir_storage_ops.MirStorageOps.save_and_commit(mir_root=self._mir_root, + mir_branch='b', + his_branch='master', + mir_datas={ + mirpb.MirStorage.MIR_METADATAS: mir_metadatas, + mirpb.MirStorage.MIR_ANNOTATIONS: mir_annotations, + }, + task=task) + + def _deprepare_dirs(self) -> None: + if os.path.isdir(self._test_root): + shutil.rmtree(self._test_root) + + # public: test cases + def test_mir_coco(self): + mir_coco = det_eval.MirCoco(mir_root=self._mir_root, + rev_tid=revs_parser.parse_single_arg_rev('a@a', need_tid=False), + conf_thr=0) + self.assertEqual(['a0', 'a1', 'a2'], mir_coco.get_asset_ids()) + self.assertEqual([0, 1, 2], mir_coco.get_asset_idxes()) + self.assertEqual([0, 1, 2], mir_coco.get_class_ids()) + + self.assertEqual(2, len(mir_coco.img_cat_to_annotations[(0, 0)])) + + def test_mir_eval_00(self): + """ align our eval with original COCOeval """ + + # original result from pycocotools + expected_stats = np.array( + [0.61177118, 0.88888889, 0.41749175, -1.0, 0.46716172, 0.9009901, 0.46666667, 0.7, 0.7, -1.0, 0.6, 0.9]) + + # ymir's eval + mir_gt = det_eval.MirCoco(mir_root=self._mir_root, + rev_tid=revs_parser.parse_single_arg_rev('a@a', need_tid=False), + conf_thr=0) + mir_dt = det_eval.MirCoco(mir_root=self._mir_root, + rev_tid=revs_parser.parse_single_arg_rev('b@b', need_tid=False), + conf_thr=0) + mir_evaluator = det_eval.MirDetEval(coco_gt=mir_gt, coco_dt=mir_dt) + mir_evaluator.evaluate() + mir_evaluator.accumulate() + mir_evaluator.summarize() + self.assertTrue(np.isclose(expected_stats, mir_evaluator.stats).all()) + + mir_evaluation_result = mir_evaluator.get_evaluation_result() + self.assertTrue(len(mir_evaluation_result.iou_evaluations) > 0) diff --git a/ymir/command/tests/utils.py b/ymir/command/tests/utils.py index a086374e20..2c48f39db8 100644 --- a/ymir/command/tests/utils.py +++ b/ymir/command/tests/utils.py @@ -26,8 +26,8 @@ def check_commands(): # mir repo operations -def mir_repo_init(mir_root: str, project_class_names: str = ''): - return_code = CmdInit.run_with_args(mir_root, project_class_names=project_class_names, empty_rev='') +def mir_repo_init(mir_root: str): + return_code = CmdInit.run_with_args(mir_root, empty_rev='') assert return_code == MirCode.RC_OK, "init failed" diff --git a/ymir/web/package-lock.json b/ymir/web/package-lock.json index 68d90c788e..1250f3a0be 100644 --- a/ymir/web/package-lock.json +++ b/ymir/web/package-lock.json @@ -1,12 +1,12 @@ { "name": "ymir-web", - "version": "1.0.0", + "version": "1.1.0.0517", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "ymir-web", - "version": "1.0.0", + "version": "1.1.0.0517", "hasInstallScript": true, "dependencies": { "@ant-design/pro-layout": "^6.32.6", @@ -15,7 +15,7 @@ "@antv/graphin-components": "^2.4.0", "@antv/graphin-icons": "^1.0.0", "@umijs/preset-react": "1.x", - "antd": "^4.18.4", + "antd": "^4.20.0", "antd-img-crop": "^4.1.0", "axios": "^0.25.0", "color": "^4.2.0", @@ -2912,9 +2912,9 @@ } }, "node_modules/antd": { - "version": "4.18.5", - "resolved": "https://registry.npmmirror.com/antd/download/antd-4.18.5.tgz", - "integrity": "sha512-5fN3C2lWAzonhOYYlNpzIw2OHl7vxFZ+4cJ7DK/XZrV+75OY61Y+OkanqMJwrFtDDamIez35OM7cAezGko9tew==", + "version": "4.20.0", + "resolved": "https://registry.npmmirror.com/antd/-/antd-4.20.0.tgz", + "integrity": "sha512-Msowfvabsn/yJIo3qYU0vMqGb31OUylMeFRDilosBViG2AS8R2VB2IX53kbw4kFV3vr7fr2HXcuQkf/FMLU+Dg==", "dependencies": { "@ant-design/colors": "^6.0.0", "@ant-design/icons": "^4.7.0", @@ -2925,38 +2925,40 @@ "copy-to-clipboard": "^3.2.0", "lodash": "^4.17.21", "memoize-one": "^6.0.0", - "moment": "^2.25.3", - "rc-cascader": "~3.2.1", + "moment": "^2.29.2", + "rc-cascader": "~3.5.0", "rc-checkbox": "~2.3.0", "rc-collapse": "~3.1.0", - "rc-dialog": "~8.6.0", + "rc-dialog": "~8.8.1", "rc-drawer": "~4.4.2", - "rc-dropdown": "~3.2.0", - "rc-field-form": "~1.22.0-2", - "rc-image": "~5.2.5", + "rc-dropdown": "~3.5.0", + "rc-field-form": "~1.26.1", + "rc-image": "~5.6.0", + "rc-input": "~0.0.1-alpha.5", "rc-input-number": "~7.3.0", - "rc-mentions": "~1.6.1", - "rc-menu": "~9.2.1", - "rc-motion": "^2.4.4", - "rc-notification": "~4.5.7", + "rc-mentions": "~1.7.0", + "rc-menu": "~9.5.5", + "rc-motion": "^2.5.1", + "rc-notification": "~4.6.0", "rc-pagination": "~3.1.9", - "rc-picker": "~2.5.17", + "rc-picker": "~2.6.4", "rc-progress": "~3.2.1", "rc-rate": "~2.9.0", "rc-resize-observer": "^1.2.0", - "rc-select": "~14.0.0-alpha.15", - "rc-slider": "~9.7.4", + "rc-segmented": "~2.0.0", + "rc-select": "~14.1.1", + "rc-slider": "~10.0.0", "rc-steps": "~4.1.0", "rc-switch": "~3.2.0", - "rc-table": "~7.22.2", - "rc-tabs": "~11.10.0", + "rc-table": "~7.24.0", + "rc-tabs": "~11.13.0", "rc-textarea": "~0.3.0", "rc-tooltip": "~5.1.1", - "rc-tree": "~5.4.3", - "rc-tree-select": "~5.1.1", + "rc-tree": "~5.5.0", + "rc-tree-select": "~5.3.0", "rc-trigger": "^5.2.10", "rc-upload": "~4.3.0", - "rc-util": "^5.14.0", + "rc-util": "^5.20.0", "scroll-into-view-if-needed": "^2.2.25" }, "peerDependencies": { @@ -3261,10 +3263,9 @@ "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== sha1-3TeelPDbgxCwgpH51kwyCXZmF/0=" }, "node_modules/async-validator": { - "version": "4.0.7", - "resolved": "https://registry.npmmirror.com/async-validator/download/async-validator-4.0.7.tgz", - "integrity": "sha512-Pj2IR7u8hmUEDOwB++su6baaRi+QvsgajuFB9j95foM1N2gy5HM4z60hfusIO0fBPG5uLAEl6yCJr1jNSVugEQ== sha1-A0oP0hA6ay6/AQ2nUYO+wpkkev4=", - "license": "MIT" + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-4.1.1.tgz", + "integrity": "sha512-p4DO/JXwjs8klJyJL8Q2oM4ks5fUTze/h5k10oPPKMiLe1fj3G1QMzPHNmN1Py4ycOk7WlO2DcGXv1qiESJCZA==" }, "node_modules/asynckit": { "version": "0.4.0", @@ -4619,20 +4620,16 @@ }, "node_modules/date-fns": { "version": "2.28.0", - "resolved": "https://registry.npmmirror.com/date-fns/download/date-fns-2.28.0.tgz", + "resolved": "https://registry.npmmirror.com/date-fns/-/date-fns-2.28.0.tgz", "integrity": "sha512-8d35hViGYx/QH0icHYCeLmsLmMUheMmTyV9Fcm6gvNwdw31yXXH+O85sOBJ+OLnLQMKZowvpKb6FgMIQjcpvQw==", "engines": { "node": ">=0.11" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/date-fns" } }, "node_modules/dayjs": { - "version": "1.10.7", - "resolved": "https://registry.npmmirror.com/dayjs/download/dayjs-1.10.7.tgz", - "integrity": "sha512-P6twpd70BcPK34K26uJ1KT3wlhpuOAPoMwJzpsIWUxHZ7wpmbdZL/hQqBDfz7hGurYSa5PhzdhDHtt319hL3ig==" + "version": "1.11.2", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.2.tgz", + "integrity": "sha512-F4LXf1OeU9hrSYRPTTj/6FbO4HTjPKXvEIC1P2kcnFurViINCVk3ZV0xAS3XVx9MkMsXbbqlK6hjseaYbgKEHw==" }, "node_modules/debug": { "version": "4.3.3", @@ -9864,9 +9861,9 @@ } }, "node_modules/moment": { - "version": "2.29.1", - "resolved": "https://registry.npmmirror.com/moment/download/moment-2.29.1.tgz", - "integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==", + "version": "2.29.3", + "resolved": "https://registry.npmmirror.com/moment/-/moment-2.29.3.tgz", + "integrity": "sha512-c6YRvhEo//6T2Jz/vVtYzqBzwvPT95JBQ+smCytzf7c50oMZRsR/a4w88aD34I+/QVSfnoAnSBFPJHItlOMJVw==", "engines": { "node": "*" } @@ -11302,9 +11299,9 @@ } }, "node_modules/rc-align": { - "version": "4.0.11", - "resolved": "https://registry.nlark.com/rc-align/download/rc-align-4.0.11.tgz?cache=0&sync_timestamp=1628678366435&other_urls=https%3A%2F%2Fregistry.nlark.com%2Frc-align%2Fdownload%2Frc-align-4.0.11.tgz", - "integrity": "sha512-n9mQfIYQbbNTbefyQnRHZPWuTEwG1rY4a9yKlIWHSTbgwI+XUMGRYd0uJ5pE2UbrNX0WvnMBA1zJ3Lrecpra/A== sha1-gZjGLbJmvBuO8F5WwTJ1v3Jiil4=", + "version": "4.0.12", + "resolved": "https://registry.npmmirror.com/rc-align/-/rc-align-4.0.12.tgz", + "integrity": "sha512-3DuwSJp8iC/dgHzwreOQl52soj40LchlfUHtgACOUtwGuoFIOVh6n/sCpfqCU8kO5+iz6qR0YKvjgB8iPdE3aQ==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -11350,15 +11347,15 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== sha1-eJcppNw23imZ3BVt1sHZwYzqVqQ=" }, "node_modules/rc-cascader": { - "version": "3.2.1", - "resolved": "https://registry.npmmirror.com/rc-cascader/download/rc-cascader-3.2.1.tgz", - "integrity": "sha512-Raxam9tFzBL4TCgHoyVcf7+Q2KSFneUk3FZXi9w1tfxEihLlezSH0oCNMjHJN8hxWwwx9ZbI9UzWTfFImjXc0Q==", + "version": "3.5.0", + "resolved": "https://registry.npmmirror.com/rc-cascader/-/rc-cascader-3.5.0.tgz", + "integrity": "sha512-rpXnWCfvk7Frh2dBzMoA0c7i0nn6aJU7L2NZo8R8pNkrT0sKgytQSpdtPWP+Pq8IkvwbEd8BU8Z8OnOljcqgZg==", "dependencies": { "@babel/runtime": "^7.12.5", "array-tree-filter": "^2.1.0", "classnames": "^2.3.1", - "rc-select": "~14.0.0-alpha.23", - "rc-tree": "~5.4.3", + "rc-select": "~14.1.0", + "rc-tree": "~5.5.0", "rc-util": "^5.6.1" }, "peerDependencies": { @@ -11396,14 +11393,14 @@ } }, "node_modules/rc-dialog": { - "version": "8.6.0", - "resolved": "https://registry.nlark.com/rc-dialog/download/rc-dialog-8.6.0.tgz", - "integrity": "sha512-GSbkfqjqxpZC5/zc+8H332+q5l/DKUhpQr0vdX2uDsxo5K0PhvaMEVjyoJUTkZ3+JstEADQji1PVLVb/2bJeOQ== sha1-OyKNrAhd5e7YxiN/MRYhBGh0Quc=", + "version": "8.8.1", + "resolved": "https://registry.npmmirror.com/rc-dialog/-/rc-dialog-8.8.1.tgz", + "integrity": "sha512-7M1WKZCjfIABKEaJVskdYvb80z+RX7I11PeSjPVfLOOaJAmIepvDEd0alBtOZvOL3fZFWlMs4JVZtp9LZgONxA==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.6", "rc-motion": "^2.3.0", - "rc-util": "^5.6.1" + "rc-util": "^5.21.0" }, "peerDependencies": { "react": ">=16.9.0", @@ -11425,34 +11422,35 @@ } }, "node_modules/rc-dropdown": { - "version": "3.2.2", - "resolved": "https://registry.npmmirror.com/rc-dropdown/download/rc-dropdown-3.2.2.tgz", - "integrity": "sha512-oA9VYYg+jQaPRdFoYFfBn5EAQk2NlL6H0vR2v6JG/8i4HEfUq8p1TTt6HyQ/dGxLe8lpnK+nM7WCjgZT/cpSRQ==", + "version": "3.5.2", + "resolved": "https://registry.npmmirror.com/rc-dropdown/-/rc-dropdown-3.5.2.tgz", + "integrity": "sha512-Ty4LsXjkspZuFJSRx3blCLLCDicXM5qds6F1odgEa+jcjC+OJKHQGnvE4FqtoljPaqWm4wG78pbgXH6Ddh2DkA==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.6", - "rc-trigger": "^5.0.4" + "rc-trigger": "^5.0.4", + "rc-util": "^5.17.0" }, "peerDependencies": { - "react": "*", - "react-dom": "*" + "react": ">=16.11.0", + "react-dom": ">=16.11.0" } }, "node_modules/rc-field-form": { - "version": "1.22.1", - "resolved": "https://registry.npmmirror.com/rc-field-form/download/rc-field-form-1.22.1.tgz", - "integrity": "sha512-LweU7nBeqmC5r3HDUjRprcOXXobHXp/TGIxD7ppBq5FX6Iptt3ibdpRVg4RSyNulBNGHOuknHlRcguuIpvVMVg==", + "version": "1.26.4", + "resolved": "https://registry.npmmirror.com/rc-field-form/-/rc-field-form-1.26.4.tgz", + "integrity": "sha512-eCCyiNNaN0NTYTyoziQHD4Fj6mUED21lWkw66vg+kttg0eDw+miD6LsaJbTD5c2bzKjUJTf10AitPG+f5zT4+A==", "dependencies": { "@babel/runtime": "^7.8.4", - "async-validator": "^4.0.2", + "async-validator": "^4.1.0", "rc-util": "^5.8.0" }, "engines": { "node": ">=8.x" }, "peerDependencies": { - "react": ">= 16.9.0", - "react-dom": ">= 16.9.0" + "react": ">=16.9.0", + "react-dom": ">=16.9.0" } }, "node_modules/rc-gesture": { @@ -11464,13 +11462,13 @@ } }, "node_modules/rc-image": { - "version": "5.2.5", - "resolved": "https://registry.nlark.com/rc-image/download/rc-image-5.2.5.tgz?cache=0&other_urls=https%3A%2F%2Fregistry.nlark.com%2Frc-image%2Fdownload%2Frc-image-5.2.5.tgz", - "integrity": "sha512-qUfZjYIODxO0c8a8P5GeuclYXZjzW4hV/5hyo27XqSFo1DmTCs2HkVeQObkcIk5kNsJtgsj1KoPThVsSc/PXOw== sha1-ROb/yEJiaCeWDnq3LhwNbzqM5EA=", + "version": "5.6.2", + "resolved": "https://registry.npmmirror.com/rc-image/-/rc-image-5.6.2.tgz", + "integrity": "sha512-qhKOVvivCZkd6CrzS/4ST2+Auu16mtPSFVqVzwE7sELWfuvzcLGTzGv8UsVvm6qRNIz6SeaueUetqi4Ii16XQA==", "dependencies": { "@babel/runtime": "^7.11.2", "classnames": "^2.2.6", - "rc-dialog": "~8.6.0", + "rc-dialog": "~8.8.0", "rc-util": "^5.0.6" }, "peerDependencies": { @@ -11478,6 +11476,20 @@ "react-dom": ">=16.9.0" } }, + "node_modules/rc-input": { + "version": "0.0.1-alpha.7", + "resolved": "https://registry.npmmirror.com/rc-input/-/rc-input-0.0.1-alpha.7.tgz", + "integrity": "sha512-eozaqpCYWSY5LBMwlHgC01GArkVEP+XlJ84OMvdkwUnJBSv83Yxa15pZpn7vACAj84uDC4xOA2CoFdbLuqB08Q==", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, "node_modules/rc-input-number": { "version": "7.3.4", "resolved": "https://registry.npmmirror.com/rc-input-number/download/rc-input-number-7.3.4.tgz", @@ -11493,13 +11505,13 @@ } }, "node_modules/rc-mentions": { - "version": "1.6.1", - "resolved": "https://registry.nlark.com/rc-mentions/download/rc-mentions-1.6.1.tgz", - "integrity": "sha512-LDzGI8jJVGnkhpTZxZuYBhMz3avcZZqPGejikchh97xPni/g4ht714Flh7DVvuzHQ+BoKHhIjobHnw1rcP8erg== sha1-RgNQJ9ZKoz74QLoPvUEYceNGF64=", + "version": "1.7.1", + "resolved": "https://registry.npmmirror.com/rc-mentions/-/rc-mentions-1.7.1.tgz", + "integrity": "sha512-JbCS9bTqt6BYN2vfTPythlScLuc42rIlX85n7975RnkfawXlJjskHOlR3o8EpD4asl4KuA2jKTy0dj39DtSVqg==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.6", - "rc-menu": "^9.0.0", + "rc-menu": "~9.5.1", "rc-textarea": "^0.3.0", "rc-trigger": "^5.0.4", "rc-util": "^5.0.1" @@ -11510,9 +11522,9 @@ } }, "node_modules/rc-menu": { - "version": "9.2.1", - "resolved": "https://registry.npmmirror.com/rc-menu/download/rc-menu-9.2.1.tgz", - "integrity": "sha512-UbEtn3rflJ8zS+etYGTVQuzy7Fm+yWXR5c0Rl6ecNTS/dPknRyWAyhJcbeR0Hu1+RdQT+0VCqrUPrgKnm4iY+w==", + "version": "9.5.5", + "resolved": "https://registry.npmmirror.com/rc-menu/-/rc-menu-9.5.5.tgz", + "integrity": "sha512-wj2y2BAKwSMyWXO3RBf9sNN5V+DFWxFl45Ma6qQEHA5nwwh7p07bNgc6AAJc+L1+LAz+rWz3AU8PYyT17hMHCw==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -11528,13 +11540,13 @@ } }, "node_modules/rc-motion": { - "version": "2.4.4", - "resolved": "https://registry.nlark.com/rc-motion/download/rc-motion-2.4.4.tgz", - "integrity": "sha512-ms7n1+/TZQBS0Ydd2Q5P4+wJTSOrhIrwNxLXCZpR7Fa3/oac7Yi803HDALc2hLAKaCTQtw9LmQeB58zcwOsqlQ== sha1-6ZXV+iT8kwZcJPcUhXzyZ31lW7A=", + "version": "2.6.0", + "resolved": "https://registry.npmmirror.com/rc-motion/-/rc-motion-2.6.0.tgz", + "integrity": "sha512-1MDWA9+i174CZ0SIDenSYm2Wb9YbRkrexjZWR0CUFu7D6f23E8Y0KsTgk9NGOLJsGak5ELZK/Y5lOlf5wQdzbw==", "dependencies": { "@babel/runtime": "^7.11.1", "classnames": "^2.2.1", - "rc-util": "^5.2.1" + "rc-util": "^5.21.0" }, "peerDependencies": { "react": ">=16.9.0", @@ -11542,14 +11554,14 @@ } }, "node_modules/rc-notification": { - "version": "4.5.7", - "resolved": "https://registry.npmmirror.com/rc-notification/download/rc-notification-4.5.7.tgz", - "integrity": "sha512-zhTGUjBIItbx96SiRu3KVURcLOydLUHZCPpYEn1zvh+re//Tnq/wSxN4FKgp38n4HOgHSVxcLEeSxBMTeBBDdw== sha1-Jl5uagwaD6xj1qvU2DLrj/MVIvE=", + "version": "4.6.0", + "resolved": "https://registry.npmmirror.com/rc-notification/-/rc-notification-4.6.0.tgz", + "integrity": "sha512-xF3MKgIoynzjQAO4lqsoraiFo3UXNYlBfpHs0VWvwF+4pimen9/H1DYLN2mfRWhHovW6gRpla73m2nmyIqAMZQ==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.2.0", - "rc-util": "^5.0.1" + "rc-util": "^5.20.1" }, "engines": { "node": ">=8.x" @@ -11560,14 +11572,14 @@ } }, "node_modules/rc-overflow": { - "version": "1.2.2", - "resolved": "https://registry.nlark.com/rc-overflow/download/rc-overflow-1.2.2.tgz", - "integrity": "sha512-X5kj9LDU1ue5wHkqvCprJWLKC+ZLs3p4He/oxjZ1Q4NKaqKBaYf5OdSzRSgh3WH8kSdrfU8LjvlbWnHgJOEkNQ== sha1-lbAiIBbAzb3A24X1acJi53BqXyI=", + "version": "1.2.5", + "resolved": "https://registry.npmmirror.com/rc-overflow/-/rc-overflow-1.2.5.tgz", + "integrity": "sha512-5HJKZ4nPe9e7AFdCkflgpRydvH6lJ4i2iFF06q/T1G9lL/XBeuoPLRrTBU8ao/Vo/yARW6WfEHnC2951lVgX5Q==", "dependencies": { "@babel/runtime": "^7.11.1", "classnames": "^2.2.1", "rc-resize-observer": "^1.0.0", - "rc-util": "^5.5.1" + "rc-util": "^5.19.2" }, "peerDependencies": { "react": ">=16.9.0", @@ -11588,10 +11600,9 @@ } }, "node_modules/rc-picker": { - "version": "2.5.19", - "resolved": "https://registry.npmmirror.com/rc-picker/download/rc-picker-2.5.19.tgz", - "integrity": "sha512-u6myoCu/qiQ0vLbNzSzNrzTQhs7mldArCpPHrEI6OUiifs+IPXmbesqSm0zilJjfzrZJLgYeyyOMSznSlh0GKA== sha1-c9B1RvrDmS8L+r8niWVKyto55G8=", - "license": "MIT", + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/rc-picker/-/rc-picker-2.6.9.tgz", + "integrity": "sha512-yH3UYXCADf7REtOAB5cwe1cyFKtB0p204RCN8JdZGG4uuSOZ1IPTkk/GJS6HOpxspZeJCLGzzajuQMDwck9dsw==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.1", @@ -11656,10 +11667,25 @@ "react-dom": ">=16.9.0" } }, + "node_modules/rc-segmented": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/rc-segmented/-/rc-segmented-2.0.0.tgz", + "integrity": "sha512-YsdS+aP7E6ZMEY35WSlewJIsrjPbBSP4X/7RvZtzLExKDZwFvXdCPCbWFVDNks4jOYY9TUPYt7qlVifEu9/zXA==", + "dependencies": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-motion": "^2.4.4", + "rc-util": "^5.17.0" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, "node_modules/rc-select": { - "version": "14.0.0-alpha.24", - "resolved": "https://registry.npmmirror.com/rc-select/download/rc-select-14.0.0-alpha.24.tgz", - "integrity": "sha512-EBjdBv4aIXNQC4stn/eh3zdsF3piRsFCS+4aYjwkm6T0oc5zI48dlYtrMs/lwR/3H23At6L2w3fhHhhH8qNbJQ==", + "version": "14.1.2", + "resolved": "https://registry.npmmirror.com/rc-select/-/rc-select-14.1.2.tgz", + "integrity": "sha512-/QgarL/T/d7MIPcoRmTca2TWHBoHBM1EQIgdaFmvl3qsYRSbrb8NpWcQuJoc9fprXERWxdYSTUThQObHvdEVBQ==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -11678,14 +11704,14 @@ } }, "node_modules/rc-slider": { - "version": "9.7.5", - "resolved": "https://registry.npmmirror.com/rc-slider/download/rc-slider-9.7.5.tgz", - "integrity": "sha512-LV/MWcXFjco1epPbdw1JlLXlTgmWpB9/Y/P2yinf8Pg3wElHxA9uajN21lJiWtZjf5SCUekfSP6QMJfDo4t1hg==", + "version": "10.0.0", + "resolved": "https://registry.npmmirror.com/rc-slider/-/rc-slider-10.0.0.tgz", + "integrity": "sha512-Bk54UIKWW4wyhHcL8ehAxt+wX+n69dscnHTX6Uv0FMxSke/TGrlkZz1LSIWblCpfE2zr/dwR2Ca8nZGk3U+Tbg==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.5", "rc-tooltip": "^5.0.1", - "rc-util": "^5.16.1", + "rc-util": "^5.18.1", "shallowequal": "^1.1.0" }, "engines": { @@ -11739,9 +11765,9 @@ } }, "node_modules/rc-table": { - "version": "7.22.2", - "resolved": "https://registry.npmmirror.com/rc-table/download/rc-table-7.22.2.tgz", - "integrity": "sha512-Ng2gNkGi6ybl6dzneRn2H4Gp8XhIbRa5rXQ7ZhZcgWVmfVMok70UHGPXcf68tXW6O0/qckTf/eOVsoviSvK4sw==", + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/rc-table/-/rc-table-7.24.1.tgz", + "integrity": "sha512-DRWpv5z5pmOaTmy5GqWoskeV1thaOu5HuD+2f61b/CkbBqlgJR3cygc5R/Qvd2uVW6pHU0lYulhmz0VLVFm+rw==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.5", @@ -11758,14 +11784,14 @@ } }, "node_modules/rc-tabs": { - "version": "11.10.5", - "resolved": "https://registry.npmmirror.com/rc-tabs/download/rc-tabs-11.10.5.tgz", - "integrity": "sha512-DDuUdV6b9zGRYLtjI5hyejWLKoz1QiLWNgMeBzc3aMeQylZFhTYnFGdDc6HRqj5IYearNTsFPVSA+6VIT8g5cg==", + "version": "11.13.0", + "resolved": "https://registry.npmmirror.com/rc-tabs/-/rc-tabs-11.13.0.tgz", + "integrity": "sha512-aUw1Pq0B1a2zGX4o/m3yrQycZcCLgDp6gKwn8IAU07q148RRONsVGxi0oLVVe5SE51kOB+j0bk1RX43ZBdZNgA==", "dependencies": { "@babel/runtime": "^7.11.2", "classnames": "2.x", - "rc-dropdown": "^3.2.0", - "rc-menu": "^9.0.0", + "rc-dropdown": "~3.5.0", + "rc-menu": "~9.5.1", "rc-resize-observer": "^1.0.0", "rc-util": "^5.5.0" }, @@ -11779,7 +11805,7 @@ }, "node_modules/rc-textarea": { "version": "0.3.7", - "resolved": "https://registry.npmmirror.com/rc-textarea/download/rc-textarea-0.3.7.tgz", + "resolved": "https://registry.npmmirror.com/rc-textarea/-/rc-textarea-0.3.7.tgz", "integrity": "sha512-yCdZ6binKmAQB13hc/oehh0E/QRwoPP1pjF21aHBxlgXO3RzPF6dUu4LG2R4FZ1zx/fQd2L1faktulrXOM/2rw==", "dependencies": { "@babel/runtime": "^7.10.1", @@ -11795,8 +11821,8 @@ }, "node_modules/rc-tooltip": { "version": "5.1.1", - "resolved": "https://registry.nlark.com/rc-tooltip/download/rc-tooltip-5.1.1.tgz", - "integrity": "sha512-alt8eGMJulio6+4/uDm7nvV+rJq9bsfxFDCI0ljPdbuoygUscbsMYb6EQgwib/uqsXQUvzk+S7A59uYHmEgmDA== sha1-lBeO0WLQJSvEmTtyX13CrA/M8VQ=", + "resolved": "https://registry.npmmirror.com/rc-tooltip/-/rc-tooltip-5.1.1.tgz", + "integrity": "sha512-alt8eGMJulio6+4/uDm7nvV+rJq9bsfxFDCI0ljPdbuoygUscbsMYb6EQgwib/uqsXQUvzk+S7A59uYHmEgmDA==", "dependencies": { "@babel/runtime": "^7.11.2", "rc-trigger": "^5.0.0" @@ -11807,15 +11833,15 @@ } }, "node_modules/rc-tree": { - "version": "5.4.3", - "resolved": "https://registry.npmmirror.com/rc-tree/download/rc-tree-5.4.3.tgz", - "integrity": "sha512-WAHV8FkBerulj9J/+61+Qn0TD/Zo37PrDG8/45WomzGTYavxFMur9YguKjQj/J+NxjVJzrJL3lvdSZsumfdbiA==", + "version": "5.5.0", + "resolved": "https://registry.npmmirror.com/rc-tree/-/rc-tree-5.5.0.tgz", + "integrity": "sha512-vpKeFsDyj7weik8UPseCTaSNAPt939qn1dQd8goSbRDajbjJEja0v/WFXyRhOiF1HLemNTfqMz4MYc9qlqyNXg==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.0.1", "rc-util": "^5.16.1", - "rc-virtual-list": "^3.4.1" + "rc-virtual-list": "^3.4.2" }, "engines": { "node": ">=10.x" @@ -11826,14 +11852,14 @@ } }, "node_modules/rc-tree-select": { - "version": "5.1.1", - "resolved": "https://registry.npmmirror.com/rc-tree-select/download/rc-tree-select-5.1.1.tgz", - "integrity": "sha512-jchIaOTBvJjr3WJXPJc4wCeROIktkq8Ykf888GmL94nItJmqS9H6nCjSchEtkUbtDbZwx52tIJjzc81GWQbm/w==", + "version": "5.3.0", + "resolved": "https://registry.npmmirror.com/rc-tree-select/-/rc-tree-select-5.3.0.tgz", + "integrity": "sha512-UN6CUBulmch+CsihnJ73+DtWijEB1hVTC8sdVxq6E0teVAkHQZUvDj+cwZShtShAKvWwXy73PZ1hIHEUrmVcKw==", "dependencies": { "@babel/runtime": "^7.10.1", "classnames": "2.x", - "rc-select": "~14.0.0-alpha.8", - "rc-tree": "~5.4.3", + "rc-select": "~14.1.0", + "rc-tree": "~5.5.0", "rc-util": "^5.16.1" }, "peerDependencies": { @@ -11842,15 +11868,15 @@ } }, "node_modules/rc-trigger": { - "version": "5.2.10", - "resolved": "https://registry.nlark.com/rc-trigger/download/rc-trigger-5.2.10.tgz?cache=0&sync_timestamp=1628677456708&other_urls=https%3A%2F%2Fregistry.nlark.com%2Frc-trigger%2Fdownload%2Frc-trigger-5.2.10.tgz", - "integrity": "sha512-FkUf4H9BOFDaIwu42fvRycXMAvkttph9AlbCZXssZDVzz2L+QZ0ERvfB/4nX3ZFPh1Zd+uVGr1DEDeXxq4J1TA== sha1-igBXqUCxuQJ+qjO+7IpuzYXM4rE=", + "version": "5.2.18", + "resolved": "https://registry.npmmirror.com/rc-trigger/-/rc-trigger-5.2.18.tgz", + "integrity": "sha512-hi2yZ7umtbAGLxgSph1az9BR9i4Pb4fiQa4pdvFQuKN7U//3nwwygHQKHfexnM+0APBnzZwVlEHA5I8BpWrygw==", "dependencies": { "@babel/runtime": "^7.11.2", "classnames": "^2.2.6", "rc-align": "^4.0.0", "rc-motion": "^2.0.0", - "rc-util": "^5.5.0" + "rc-util": "^5.19.2" }, "engines": { "node": ">=8.x" @@ -11875,9 +11901,9 @@ } }, "node_modules/rc-util": { - "version": "5.17.0", - "resolved": "https://registry.npmmirror.com/rc-util/download/rc-util-5.17.0.tgz", - "integrity": "sha512-HWuTIKzBeZQQ7IBqdokE0wMp/xx39/KfUJ0gcquBigoldDCrf3YBcWFHrrQlJG7sI82Wg8mwp1uAKV3zMGfAgg==", + "version": "5.21.2", + "resolved": "https://registry.npmmirror.com/rc-util/-/rc-util-5.21.2.tgz", + "integrity": "sha512-QuuZ2tKMScGtxSx3rLzgPGGDZm/np7phMqA7OcDidSf44abvSk+AdtdD7ZvQPvCEtdC6nCSI5tEVnUaYjjD9/w==", "dependencies": { "@babel/runtime": "^7.12.5", "react-is": "^16.12.0", @@ -11894,14 +11920,13 @@ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== sha1-eJcppNw23imZ3BVt1sHZwYzqVqQ=" }, "node_modules/rc-virtual-list": { - "version": "3.4.2", - "resolved": "https://registry.npmmirror.com/rc-virtual-list/download/rc-virtual-list-3.4.2.tgz", - "integrity": "sha512-OyVrrPvvFcHvV0ssz5EDZ+7Rf5qLat/+mmujjchNw5FfbJWNDwkpQ99EcVE6+FtNRmX9wFa1LGNpZLUTvp/4GQ== sha1-EHgyeqcjC15FbWee0s6Z88A269E=", - "license": "MIT", + "version": "3.4.7", + "resolved": "https://registry.npmmirror.com/rc-virtual-list/-/rc-virtual-list-3.4.7.tgz", + "integrity": "sha512-PhV8a8g/L9sCmWcmXizzwW7QdqsxK4ebHU6fA9OsUIR7isFdx2bTGU2iAUdRV4teiIF1ZHF3gSQh8NtAxrXh6A==", "dependencies": { "classnames": "^2.2.6", "rc-resize-observer": "^1.0.0", - "rc-util": "^5.0.7" + "rc-util": "^5.15.0" }, "engines": { "node": ">=8.x" @@ -17453,9 +17478,9 @@ } }, "antd": { - "version": "4.18.5", - "resolved": "https://registry.npmmirror.com/antd/download/antd-4.18.5.tgz", - "integrity": "sha512-5fN3C2lWAzonhOYYlNpzIw2OHl7vxFZ+4cJ7DK/XZrV+75OY61Y+OkanqMJwrFtDDamIez35OM7cAezGko9tew==", + "version": "4.20.0", + "resolved": "https://registry.npmmirror.com/antd/-/antd-4.20.0.tgz", + "integrity": "sha512-Msowfvabsn/yJIo3qYU0vMqGb31OUylMeFRDilosBViG2AS8R2VB2IX53kbw4kFV3vr7fr2HXcuQkf/FMLU+Dg==", "requires": { "@ant-design/colors": "^6.0.0", "@ant-design/icons": "^4.7.0", @@ -17466,38 +17491,40 @@ "copy-to-clipboard": "^3.2.0", "lodash": "^4.17.21", "memoize-one": "^6.0.0", - "moment": "^2.25.3", - "rc-cascader": "~3.2.1", + "moment": "^2.29.2", + "rc-cascader": "~3.5.0", "rc-checkbox": "~2.3.0", "rc-collapse": "~3.1.0", - "rc-dialog": "~8.6.0", + "rc-dialog": "~8.8.1", "rc-drawer": "~4.4.2", - "rc-dropdown": "~3.2.0", - "rc-field-form": "~1.22.0-2", - "rc-image": "~5.2.5", + "rc-dropdown": "~3.5.0", + "rc-field-form": "~1.26.1", + "rc-image": "~5.6.0", + "rc-input": "~0.0.1-alpha.5", "rc-input-number": "~7.3.0", - "rc-mentions": "~1.6.1", - "rc-menu": "~9.2.1", - "rc-motion": "^2.4.4", - "rc-notification": "~4.5.7", + "rc-mentions": "~1.7.0", + "rc-menu": "~9.5.5", + "rc-motion": "^2.5.1", + "rc-notification": "~4.6.0", "rc-pagination": "~3.1.9", - "rc-picker": "~2.5.17", + "rc-picker": "~2.6.4", "rc-progress": "~3.2.1", "rc-rate": "~2.9.0", "rc-resize-observer": "^1.2.0", - "rc-select": "~14.0.0-alpha.15", - "rc-slider": "~9.7.4", + "rc-segmented": "~2.0.0", + "rc-select": "~14.1.1", + "rc-slider": "~10.0.0", "rc-steps": "~4.1.0", "rc-switch": "~3.2.0", - "rc-table": "~7.22.2", - "rc-tabs": "~11.10.0", + "rc-table": "~7.24.0", + "rc-tabs": "~11.13.0", "rc-textarea": "~0.3.0", "rc-tooltip": "~5.1.1", - "rc-tree": "~5.4.3", - "rc-tree-select": "~5.1.1", + "rc-tree": "~5.5.0", + "rc-tree-select": "~5.3.0", "rc-trigger": "^5.2.10", "rc-upload": "~4.3.0", - "rc-util": "^5.14.0", + "rc-util": "^5.20.0", "scroll-into-view-if-needed": "^2.2.25" }, "dependencies": { @@ -17774,9 +17801,9 @@ "integrity": "sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== sha1-3TeelPDbgxCwgpH51kwyCXZmF/0=" }, "async-validator": { - "version": "4.0.7", - "resolved": "https://registry.npmmirror.com/async-validator/download/async-validator-4.0.7.tgz", - "integrity": "sha512-Pj2IR7u8hmUEDOwB++su6baaRi+QvsgajuFB9j95foM1N2gy5HM4z60hfusIO0fBPG5uLAEl6yCJr1jNSVugEQ== sha1-A0oP0hA6ay6/AQ2nUYO+wpkkev4=" + "version": "4.1.1", + "resolved": "https://registry.npmmirror.com/async-validator/-/async-validator-4.1.1.tgz", + "integrity": "sha512-p4DO/JXwjs8klJyJL8Q2oM4ks5fUTze/h5k10oPPKMiLe1fj3G1QMzPHNmN1Py4ycOk7WlO2DcGXv1qiESJCZA==" }, "asynckit": { "version": "0.4.0", @@ -18913,13 +18940,13 @@ }, "date-fns": { "version": "2.28.0", - "resolved": "https://registry.npmmirror.com/date-fns/download/date-fns-2.28.0.tgz", + "resolved": "https://registry.npmmirror.com/date-fns/-/date-fns-2.28.0.tgz", "integrity": "sha512-8d35hViGYx/QH0icHYCeLmsLmMUheMmTyV9Fcm6gvNwdw31yXXH+O85sOBJ+OLnLQMKZowvpKb6FgMIQjcpvQw==" }, "dayjs": { - "version": "1.10.7", - "resolved": "https://registry.npmmirror.com/dayjs/download/dayjs-1.10.7.tgz", - "integrity": "sha512-P6twpd70BcPK34K26uJ1KT3wlhpuOAPoMwJzpsIWUxHZ7wpmbdZL/hQqBDfz7hGurYSa5PhzdhDHtt319hL3ig==" + "version": "1.11.2", + "resolved": "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.2.tgz", + "integrity": "sha512-F4LXf1OeU9hrSYRPTTj/6FbO4HTjPKXvEIC1P2kcnFurViINCVk3ZV0xAS3XVx9MkMsXbbqlK6hjseaYbgKEHw==" }, "debug": { "version": "4.3.3", @@ -23067,9 +23094,9 @@ } }, "moment": { - "version": "2.29.1", - "resolved": "https://registry.npmmirror.com/moment/download/moment-2.29.1.tgz", - "integrity": "sha512-kHmoybcPV8Sqy59DwNDY3Jefr64lK/by/da0ViFcuA4DH0vQg5Q6Ze5VimxkfQNSC+Mls/Kx53s7TjP1RhFEDQ==" + "version": "2.29.3", + "resolved": "https://registry.npmmirror.com/moment/-/moment-2.29.3.tgz", + "integrity": "sha512-c6YRvhEo//6T2Jz/vVtYzqBzwvPT95JBQ+smCytzf7c50oMZRsR/a4w88aD34I+/QVSfnoAnSBFPJHItlOMJVw==" }, "ms": { "version": "2.1.2", @@ -24239,9 +24266,9 @@ } }, "rc-align": { - "version": "4.0.11", - "resolved": "https://registry.nlark.com/rc-align/download/rc-align-4.0.11.tgz?cache=0&sync_timestamp=1628678366435&other_urls=https%3A%2F%2Fregistry.nlark.com%2Frc-align%2Fdownload%2Frc-align-4.0.11.tgz", - "integrity": "sha512-n9mQfIYQbbNTbefyQnRHZPWuTEwG1rY4a9yKlIWHSTbgwI+XUMGRYd0uJ5pE2UbrNX0WvnMBA1zJ3Lrecpra/A== sha1-gZjGLbJmvBuO8F5WwTJ1v3Jiil4=", + "version": "4.0.12", + "resolved": "https://registry.npmmirror.com/rc-align/-/rc-align-4.0.12.tgz", + "integrity": "sha512-3DuwSJp8iC/dgHzwreOQl52soj40LchlfUHtgACOUtwGuoFIOVh6n/sCpfqCU8kO5+iz6qR0YKvjgB8iPdE3aQ==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -24285,15 +24312,15 @@ } }, "rc-cascader": { - "version": "3.2.1", - "resolved": "https://registry.npmmirror.com/rc-cascader/download/rc-cascader-3.2.1.tgz", - "integrity": "sha512-Raxam9tFzBL4TCgHoyVcf7+Q2KSFneUk3FZXi9w1tfxEihLlezSH0oCNMjHJN8hxWwwx9ZbI9UzWTfFImjXc0Q==", + "version": "3.5.0", + "resolved": "https://registry.npmmirror.com/rc-cascader/-/rc-cascader-3.5.0.tgz", + "integrity": "sha512-rpXnWCfvk7Frh2dBzMoA0c7i0nn6aJU7L2NZo8R8pNkrT0sKgytQSpdtPWP+Pq8IkvwbEd8BU8Z8OnOljcqgZg==", "requires": { "@babel/runtime": "^7.12.5", "array-tree-filter": "^2.1.0", "classnames": "^2.3.1", - "rc-select": "~14.0.0-alpha.23", - "rc-tree": "~5.4.3", + "rc-select": "~14.1.0", + "rc-tree": "~5.5.0", "rc-util": "^5.6.1" } }, @@ -24319,14 +24346,14 @@ } }, "rc-dialog": { - "version": "8.6.0", - "resolved": "https://registry.nlark.com/rc-dialog/download/rc-dialog-8.6.0.tgz", - "integrity": "sha512-GSbkfqjqxpZC5/zc+8H332+q5l/DKUhpQr0vdX2uDsxo5K0PhvaMEVjyoJUTkZ3+JstEADQji1PVLVb/2bJeOQ== sha1-OyKNrAhd5e7YxiN/MRYhBGh0Quc=", + "version": "8.8.1", + "resolved": "https://registry.npmmirror.com/rc-dialog/-/rc-dialog-8.8.1.tgz", + "integrity": "sha512-7M1WKZCjfIABKEaJVskdYvb80z+RX7I11PeSjPVfLOOaJAmIepvDEd0alBtOZvOL3fZFWlMs4JVZtp9LZgONxA==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.6", "rc-motion": "^2.3.0", - "rc-util": "^5.6.1" + "rc-util": "^5.21.0" } }, "rc-drawer": { @@ -24340,22 +24367,23 @@ } }, "rc-dropdown": { - "version": "3.2.2", - "resolved": "https://registry.npmmirror.com/rc-dropdown/download/rc-dropdown-3.2.2.tgz", - "integrity": "sha512-oA9VYYg+jQaPRdFoYFfBn5EAQk2NlL6H0vR2v6JG/8i4HEfUq8p1TTt6HyQ/dGxLe8lpnK+nM7WCjgZT/cpSRQ==", + "version": "3.5.2", + "resolved": "https://registry.npmmirror.com/rc-dropdown/-/rc-dropdown-3.5.2.tgz", + "integrity": "sha512-Ty4LsXjkspZuFJSRx3blCLLCDicXM5qds6F1odgEa+jcjC+OJKHQGnvE4FqtoljPaqWm4wG78pbgXH6Ddh2DkA==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.6", - "rc-trigger": "^5.0.4" + "rc-trigger": "^5.0.4", + "rc-util": "^5.17.0" } }, "rc-field-form": { - "version": "1.22.1", - "resolved": "https://registry.npmmirror.com/rc-field-form/download/rc-field-form-1.22.1.tgz", - "integrity": "sha512-LweU7nBeqmC5r3HDUjRprcOXXobHXp/TGIxD7ppBq5FX6Iptt3ibdpRVg4RSyNulBNGHOuknHlRcguuIpvVMVg==", + "version": "1.26.4", + "resolved": "https://registry.npmmirror.com/rc-field-form/-/rc-field-form-1.26.4.tgz", + "integrity": "sha512-eCCyiNNaN0NTYTyoziQHD4Fj6mUED21lWkw66vg+kttg0eDw+miD6LsaJbTD5c2bzKjUJTf10AitPG+f5zT4+A==", "requires": { "@babel/runtime": "^7.8.4", - "async-validator": "^4.0.2", + "async-validator": "^4.1.0", "rc-util": "^5.8.0" } }, @@ -24368,16 +24396,26 @@ } }, "rc-image": { - "version": "5.2.5", - "resolved": "https://registry.nlark.com/rc-image/download/rc-image-5.2.5.tgz?cache=0&other_urls=https%3A%2F%2Fregistry.nlark.com%2Frc-image%2Fdownload%2Frc-image-5.2.5.tgz", - "integrity": "sha512-qUfZjYIODxO0c8a8P5GeuclYXZjzW4hV/5hyo27XqSFo1DmTCs2HkVeQObkcIk5kNsJtgsj1KoPThVsSc/PXOw== sha1-ROb/yEJiaCeWDnq3LhwNbzqM5EA=", + "version": "5.6.2", + "resolved": "https://registry.npmmirror.com/rc-image/-/rc-image-5.6.2.tgz", + "integrity": "sha512-qhKOVvivCZkd6CrzS/4ST2+Auu16mtPSFVqVzwE7sELWfuvzcLGTzGv8UsVvm6qRNIz6SeaueUetqi4Ii16XQA==", "requires": { "@babel/runtime": "^7.11.2", "classnames": "^2.2.6", - "rc-dialog": "~8.6.0", + "rc-dialog": "~8.8.0", "rc-util": "^5.0.6" } }, + "rc-input": { + "version": "0.0.1-alpha.7", + "resolved": "https://registry.npmmirror.com/rc-input/-/rc-input-0.0.1-alpha.7.tgz", + "integrity": "sha512-eozaqpCYWSY5LBMwlHgC01GArkVEP+XlJ84OMvdkwUnJBSv83Yxa15pZpn7vACAj84uDC4xOA2CoFdbLuqB08Q==", + "requires": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-util": "^5.18.1" + } + }, "rc-input-number": { "version": "7.3.4", "resolved": "https://registry.npmmirror.com/rc-input-number/download/rc-input-number-7.3.4.tgz", @@ -24389,22 +24427,22 @@ } }, "rc-mentions": { - "version": "1.6.1", - "resolved": "https://registry.nlark.com/rc-mentions/download/rc-mentions-1.6.1.tgz", - "integrity": "sha512-LDzGI8jJVGnkhpTZxZuYBhMz3avcZZqPGejikchh97xPni/g4ht714Flh7DVvuzHQ+BoKHhIjobHnw1rcP8erg== sha1-RgNQJ9ZKoz74QLoPvUEYceNGF64=", + "version": "1.7.1", + "resolved": "https://registry.npmmirror.com/rc-mentions/-/rc-mentions-1.7.1.tgz", + "integrity": "sha512-JbCS9bTqt6BYN2vfTPythlScLuc42rIlX85n7975RnkfawXlJjskHOlR3o8EpD4asl4KuA2jKTy0dj39DtSVqg==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.6", - "rc-menu": "^9.0.0", + "rc-menu": "~9.5.1", "rc-textarea": "^0.3.0", "rc-trigger": "^5.0.4", "rc-util": "^5.0.1" } }, "rc-menu": { - "version": "9.2.1", - "resolved": "https://registry.npmmirror.com/rc-menu/download/rc-menu-9.2.1.tgz", - "integrity": "sha512-UbEtn3rflJ8zS+etYGTVQuzy7Fm+yWXR5c0Rl6ecNTS/dPknRyWAyhJcbeR0Hu1+RdQT+0VCqrUPrgKnm4iY+w==", + "version": "9.5.5", + "resolved": "https://registry.npmmirror.com/rc-menu/-/rc-menu-9.5.5.tgz", + "integrity": "sha512-wj2y2BAKwSMyWXO3RBf9sNN5V+DFWxFl45Ma6qQEHA5nwwh7p07bNgc6AAJc+L1+LAz+rWz3AU8PYyT17hMHCw==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -24416,35 +24454,35 @@ } }, "rc-motion": { - "version": "2.4.4", - "resolved": "https://registry.nlark.com/rc-motion/download/rc-motion-2.4.4.tgz", - "integrity": "sha512-ms7n1+/TZQBS0Ydd2Q5P4+wJTSOrhIrwNxLXCZpR7Fa3/oac7Yi803HDALc2hLAKaCTQtw9LmQeB58zcwOsqlQ== sha1-6ZXV+iT8kwZcJPcUhXzyZ31lW7A=", + "version": "2.6.0", + "resolved": "https://registry.npmmirror.com/rc-motion/-/rc-motion-2.6.0.tgz", + "integrity": "sha512-1MDWA9+i174CZ0SIDenSYm2Wb9YbRkrexjZWR0CUFu7D6f23E8Y0KsTgk9NGOLJsGak5ELZK/Y5lOlf5wQdzbw==", "requires": { "@babel/runtime": "^7.11.1", "classnames": "^2.2.1", - "rc-util": "^5.2.1" + "rc-util": "^5.21.0" } }, "rc-notification": { - "version": "4.5.7", - "resolved": "https://registry.npmmirror.com/rc-notification/download/rc-notification-4.5.7.tgz", - "integrity": "sha512-zhTGUjBIItbx96SiRu3KVURcLOydLUHZCPpYEn1zvh+re//Tnq/wSxN4FKgp38n4HOgHSVxcLEeSxBMTeBBDdw== sha1-Jl5uagwaD6xj1qvU2DLrj/MVIvE=", + "version": "4.6.0", + "resolved": "https://registry.npmmirror.com/rc-notification/-/rc-notification-4.6.0.tgz", + "integrity": "sha512-xF3MKgIoynzjQAO4lqsoraiFo3UXNYlBfpHs0VWvwF+4pimen9/H1DYLN2mfRWhHovW6gRpla73m2nmyIqAMZQ==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.2.0", - "rc-util": "^5.0.1" + "rc-util": "^5.20.1" } }, "rc-overflow": { - "version": "1.2.2", - "resolved": "https://registry.nlark.com/rc-overflow/download/rc-overflow-1.2.2.tgz", - "integrity": "sha512-X5kj9LDU1ue5wHkqvCprJWLKC+ZLs3p4He/oxjZ1Q4NKaqKBaYf5OdSzRSgh3WH8kSdrfU8LjvlbWnHgJOEkNQ== sha1-lbAiIBbAzb3A24X1acJi53BqXyI=", + "version": "1.2.5", + "resolved": "https://registry.npmmirror.com/rc-overflow/-/rc-overflow-1.2.5.tgz", + "integrity": "sha512-5HJKZ4nPe9e7AFdCkflgpRydvH6lJ4i2iFF06q/T1G9lL/XBeuoPLRrTBU8ao/Vo/yARW6WfEHnC2951lVgX5Q==", "requires": { "@babel/runtime": "^7.11.1", "classnames": "^2.2.1", "rc-resize-observer": "^1.0.0", - "rc-util": "^5.5.1" + "rc-util": "^5.19.2" } }, "rc-pagination": { @@ -24457,9 +24495,9 @@ } }, "rc-picker": { - "version": "2.5.19", - "resolved": "https://registry.npmmirror.com/rc-picker/download/rc-picker-2.5.19.tgz", - "integrity": "sha512-u6myoCu/qiQ0vLbNzSzNrzTQhs7mldArCpPHrEI6OUiifs+IPXmbesqSm0zilJjfzrZJLgYeyyOMSznSlh0GKA== sha1-c9B1RvrDmS8L+r8niWVKyto55G8=", + "version": "2.6.9", + "resolved": "https://registry.npmmirror.com/rc-picker/-/rc-picker-2.6.9.tgz", + "integrity": "sha512-yH3UYXCADf7REtOAB5cwe1cyFKtB0p204RCN8JdZGG4uuSOZ1IPTkk/GJS6HOpxspZeJCLGzzajuQMDwck9dsw==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.1", @@ -24502,10 +24540,21 @@ "resize-observer-polyfill": "^1.5.1" } }, + "rc-segmented": { + "version": "2.0.0", + "resolved": "https://registry.npmmirror.com/rc-segmented/-/rc-segmented-2.0.0.tgz", + "integrity": "sha512-YsdS+aP7E6ZMEY35WSlewJIsrjPbBSP4X/7RvZtzLExKDZwFvXdCPCbWFVDNks4jOYY9TUPYt7qlVifEu9/zXA==", + "requires": { + "@babel/runtime": "^7.11.1", + "classnames": "^2.2.1", + "rc-motion": "^2.4.4", + "rc-util": "^5.17.0" + } + }, "rc-select": { - "version": "14.0.0-alpha.24", - "resolved": "https://registry.npmmirror.com/rc-select/download/rc-select-14.0.0-alpha.24.tgz", - "integrity": "sha512-EBjdBv4aIXNQC4stn/eh3zdsF3piRsFCS+4aYjwkm6T0oc5zI48dlYtrMs/lwR/3H23At6L2w3fhHhhH8qNbJQ==", + "version": "14.1.2", + "resolved": "https://registry.npmmirror.com/rc-select/-/rc-select-14.1.2.tgz", + "integrity": "sha512-/QgarL/T/d7MIPcoRmTca2TWHBoHBM1EQIgdaFmvl3qsYRSbrb8NpWcQuJoc9fprXERWxdYSTUThQObHvdEVBQ==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -24517,14 +24566,14 @@ } }, "rc-slider": { - "version": "9.7.5", - "resolved": "https://registry.npmmirror.com/rc-slider/download/rc-slider-9.7.5.tgz", - "integrity": "sha512-LV/MWcXFjco1epPbdw1JlLXlTgmWpB9/Y/P2yinf8Pg3wElHxA9uajN21lJiWtZjf5SCUekfSP6QMJfDo4t1hg==", + "version": "10.0.0", + "resolved": "https://registry.npmmirror.com/rc-slider/-/rc-slider-10.0.0.tgz", + "integrity": "sha512-Bk54UIKWW4wyhHcL8ehAxt+wX+n69dscnHTX6Uv0FMxSke/TGrlkZz1LSIWblCpfE2zr/dwR2Ca8nZGk3U+Tbg==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.5", "rc-tooltip": "^5.0.1", - "rc-util": "^5.16.1", + "rc-util": "^5.18.1", "shallowequal": "^1.1.0" } }, @@ -24560,9 +24609,9 @@ } }, "rc-table": { - "version": "7.22.2", - "resolved": "https://registry.npmmirror.com/rc-table/download/rc-table-7.22.2.tgz", - "integrity": "sha512-Ng2gNkGi6ybl6dzneRn2H4Gp8XhIbRa5rXQ7ZhZcgWVmfVMok70UHGPXcf68tXW6O0/qckTf/eOVsoviSvK4sw==", + "version": "7.24.1", + "resolved": "https://registry.npmmirror.com/rc-table/-/rc-table-7.24.1.tgz", + "integrity": "sha512-DRWpv5z5pmOaTmy5GqWoskeV1thaOu5HuD+2f61b/CkbBqlgJR3cygc5R/Qvd2uVW6pHU0lYulhmz0VLVFm+rw==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "^2.2.5", @@ -24572,21 +24621,21 @@ } }, "rc-tabs": { - "version": "11.10.5", - "resolved": "https://registry.npmmirror.com/rc-tabs/download/rc-tabs-11.10.5.tgz", - "integrity": "sha512-DDuUdV6b9zGRYLtjI5hyejWLKoz1QiLWNgMeBzc3aMeQylZFhTYnFGdDc6HRqj5IYearNTsFPVSA+6VIT8g5cg==", + "version": "11.13.0", + "resolved": "https://registry.npmmirror.com/rc-tabs/-/rc-tabs-11.13.0.tgz", + "integrity": "sha512-aUw1Pq0B1a2zGX4o/m3yrQycZcCLgDp6gKwn8IAU07q148RRONsVGxi0oLVVe5SE51kOB+j0bk1RX43ZBdZNgA==", "requires": { "@babel/runtime": "^7.11.2", "classnames": "2.x", - "rc-dropdown": "^3.2.0", - "rc-menu": "^9.0.0", + "rc-dropdown": "~3.5.0", + "rc-menu": "~9.5.1", "rc-resize-observer": "^1.0.0", "rc-util": "^5.5.0" } }, "rc-textarea": { "version": "0.3.7", - "resolved": "https://registry.npmmirror.com/rc-textarea/download/rc-textarea-0.3.7.tgz", + "resolved": "https://registry.npmmirror.com/rc-textarea/-/rc-textarea-0.3.7.tgz", "integrity": "sha512-yCdZ6binKmAQB13hc/oehh0E/QRwoPP1pjF21aHBxlgXO3RzPF6dUu4LG2R4FZ1zx/fQd2L1faktulrXOM/2rw==", "requires": { "@babel/runtime": "^7.10.1", @@ -24598,47 +24647,47 @@ }, "rc-tooltip": { "version": "5.1.1", - "resolved": "https://registry.nlark.com/rc-tooltip/download/rc-tooltip-5.1.1.tgz", - "integrity": "sha512-alt8eGMJulio6+4/uDm7nvV+rJq9bsfxFDCI0ljPdbuoygUscbsMYb6EQgwib/uqsXQUvzk+S7A59uYHmEgmDA== sha1-lBeO0WLQJSvEmTtyX13CrA/M8VQ=", + "resolved": "https://registry.npmmirror.com/rc-tooltip/-/rc-tooltip-5.1.1.tgz", + "integrity": "sha512-alt8eGMJulio6+4/uDm7nvV+rJq9bsfxFDCI0ljPdbuoygUscbsMYb6EQgwib/uqsXQUvzk+S7A59uYHmEgmDA==", "requires": { "@babel/runtime": "^7.11.2", "rc-trigger": "^5.0.0" } }, "rc-tree": { - "version": "5.4.3", - "resolved": "https://registry.npmmirror.com/rc-tree/download/rc-tree-5.4.3.tgz", - "integrity": "sha512-WAHV8FkBerulj9J/+61+Qn0TD/Zo37PrDG8/45WomzGTYavxFMur9YguKjQj/J+NxjVJzrJL3lvdSZsumfdbiA==", + "version": "5.5.0", + "resolved": "https://registry.npmmirror.com/rc-tree/-/rc-tree-5.5.0.tgz", + "integrity": "sha512-vpKeFsDyj7weik8UPseCTaSNAPt939qn1dQd8goSbRDajbjJEja0v/WFXyRhOiF1HLemNTfqMz4MYc9qlqyNXg==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", "rc-motion": "^2.0.1", "rc-util": "^5.16.1", - "rc-virtual-list": "^3.4.1" + "rc-virtual-list": "^3.4.2" } }, "rc-tree-select": { - "version": "5.1.1", - "resolved": "https://registry.npmmirror.com/rc-tree-select/download/rc-tree-select-5.1.1.tgz", - "integrity": "sha512-jchIaOTBvJjr3WJXPJc4wCeROIktkq8Ykf888GmL94nItJmqS9H6nCjSchEtkUbtDbZwx52tIJjzc81GWQbm/w==", + "version": "5.3.0", + "resolved": "https://registry.npmmirror.com/rc-tree-select/-/rc-tree-select-5.3.0.tgz", + "integrity": "sha512-UN6CUBulmch+CsihnJ73+DtWijEB1hVTC8sdVxq6E0teVAkHQZUvDj+cwZShtShAKvWwXy73PZ1hIHEUrmVcKw==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", - "rc-select": "~14.0.0-alpha.8", - "rc-tree": "~5.4.3", + "rc-select": "~14.1.0", + "rc-tree": "~5.5.0", "rc-util": "^5.16.1" } }, "rc-trigger": { - "version": "5.2.10", - "resolved": "https://registry.nlark.com/rc-trigger/download/rc-trigger-5.2.10.tgz?cache=0&sync_timestamp=1628677456708&other_urls=https%3A%2F%2Fregistry.nlark.com%2Frc-trigger%2Fdownload%2Frc-trigger-5.2.10.tgz", - "integrity": "sha512-FkUf4H9BOFDaIwu42fvRycXMAvkttph9AlbCZXssZDVzz2L+QZ0ERvfB/4nX3ZFPh1Zd+uVGr1DEDeXxq4J1TA== sha1-igBXqUCxuQJ+qjO+7IpuzYXM4rE=", + "version": "5.2.18", + "resolved": "https://registry.npmmirror.com/rc-trigger/-/rc-trigger-5.2.18.tgz", + "integrity": "sha512-hi2yZ7umtbAGLxgSph1az9BR9i4Pb4fiQa4pdvFQuKN7U//3nwwygHQKHfexnM+0APBnzZwVlEHA5I8BpWrygw==", "requires": { "@babel/runtime": "^7.11.2", "classnames": "^2.2.6", "rc-align": "^4.0.0", "rc-motion": "^2.0.0", - "rc-util": "^5.5.0" + "rc-util": "^5.19.2" } }, "rc-upload": { @@ -24652,9 +24701,9 @@ } }, "rc-util": { - "version": "5.17.0", - "resolved": "https://registry.npmmirror.com/rc-util/download/rc-util-5.17.0.tgz", - "integrity": "sha512-HWuTIKzBeZQQ7IBqdokE0wMp/xx39/KfUJ0gcquBigoldDCrf3YBcWFHrrQlJG7sI82Wg8mwp1uAKV3zMGfAgg==", + "version": "5.21.2", + "resolved": "https://registry.npmmirror.com/rc-util/-/rc-util-5.21.2.tgz", + "integrity": "sha512-QuuZ2tKMScGtxSx3rLzgPGGDZm/np7phMqA7OcDidSf44abvSk+AdtdD7ZvQPvCEtdC6nCSI5tEVnUaYjjD9/w==", "requires": { "@babel/runtime": "^7.12.5", "react-is": "^16.12.0", @@ -24669,13 +24718,13 @@ } }, "rc-virtual-list": { - "version": "3.4.2", - "resolved": "https://registry.npmmirror.com/rc-virtual-list/download/rc-virtual-list-3.4.2.tgz", - "integrity": "sha512-OyVrrPvvFcHvV0ssz5EDZ+7Rf5qLat/+mmujjchNw5FfbJWNDwkpQ99EcVE6+FtNRmX9wFa1LGNpZLUTvp/4GQ== sha1-EHgyeqcjC15FbWee0s6Z88A269E=", + "version": "3.4.7", + "resolved": "https://registry.npmmirror.com/rc-virtual-list/-/rc-virtual-list-3.4.7.tgz", + "integrity": "sha512-PhV8a8g/L9sCmWcmXizzwW7QdqsxK4ebHU6fA9OsUIR7isFdx2bTGU2iAUdRV4teiIF1ZHF3gSQh8NtAxrXh6A==", "requires": { "classnames": "^2.2.6", "rc-resize-observer": "^1.0.0", - "rc-util": "^5.0.7" + "rc-util": "^5.15.0" } }, "react": { diff --git a/ymir/web/package.json b/ymir/web/package.json index 6807b5c521..cfe6b14684 100644 --- a/ymir/web/package.json +++ b/ymir/web/package.json @@ -1,6 +1,6 @@ { "name": "ymir-web", - "version": "1.0.0.0425", + "version": "1.1.0.0517", "scripts": { "local": "APP_ROOT=../pages/demo1 umi dev", "analyze": "cross-env UMI_ENV=dev ANALYZE=1 umi dev", @@ -58,7 +58,7 @@ "@antv/graphin-components": "^2.4.0", "@antv/graphin-icons": "^1.0.0", "@umijs/preset-react": "1.x", - "antd": "^4.18.4", + "antd": "^4.20.0", "antd-img-crop": "^4.1.0", "axios": "^0.25.0", "color": "^4.2.0", diff --git a/ymir/web/public/mining.zip b/ymir/web/public/mining.zip index 18f82f5f33..8097fb593e 100644 Binary files a/ymir/web/public/mining.zip and b/ymir/web/public/mining.zip differ diff --git a/ymir/web/public/sample_dataset.zip b/ymir/web/public/sample_dataset.zip index 91190cddf2..22dcd44cb1 100644 Binary files a/ymir/web/public/sample_dataset.zip and b/ymir/web/public/sample_dataset.zip differ diff --git a/ymir/web/public/val.zip b/ymir/web/public/val.zip index b302cd3e83..9038bfa893 100644 Binary files a/ymir/web/public/val.zip and b/ymir/web/public/val.zip differ diff --git a/ymir/web/src/assets/icons/iconfont.css b/ymir/web/src/assets/icons/iconfont.css index fed3b0ecf5..8428d4192e 100644 --- a/ymir/web/src/assets/icons/iconfont.css +++ b/ymir/web/src/assets/icons/iconfont.css @@ -1,6 +1,6 @@ @font-face { font-family: "iconfont"; /* Project id */ - src: url('iconfont.ttf?t=1647937649188') format('truetype'); + src: url('iconfont.ttf?t=1651198552473') format('truetype'); } .iconfont { @@ -395,3 +395,31 @@ content: "\e6f4"; } +.icon-project:before { + content: "\e6f5"; +} + +.icon-revert:before { + content: "\e6f6"; +} + +.icon-code-box-line:before { + content: "\e6f7"; +} + +.icon-loader:before { + content: "\e6f8"; +} + +.icon-than_list:before { + content: "\e6f9"; +} + +.icon-than:before { + content: "\e6fa"; +} + +.icon-pretreatment:before { + content: "\e6fb"; +} + diff --git a/ymir/web/src/assets/icons/iconfont.js b/ymir/web/src/assets/icons/iconfont.js index bdaaf22e34..36527f033b 100644 --- a/ymir/web/src/assets/icons/iconfont.js +++ b/ymir/web/src/assets/icons/iconfont.js @@ -1 +1 @@ -!function(a){var h,l,v,o,t,z='',m=(m=document.getElementsByTagName("script"))[m.length-1].getAttribute("data-injectcss"),i=function(a,h){h.parentNode.insertBefore(a,h)};if(m&&!a.__iconfont__svg__cssinject__){a.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(a){console&&console.log(a)}}function c(){t||(t=!0,v())}function p(){try{o.documentElement.doScroll("left")}catch(a){return void setTimeout(p,50)}c()}h=function(){var a,h=document.createElement("div");h.innerHTML=z,z=null,(h=h.getElementsByTagName("svg")[0])&&(h.setAttribute("aria-hidden","true"),h.style.position="absolute",h.style.width=0,h.style.height=0,h.style.overflow="hidden",h=h,(a=document.body).firstChild?i(h,a.firstChild):a.appendChild(h))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(l=function(){document.removeEventListener("DOMContentLoaded",l,!1),h()},document.addEventListener("DOMContentLoaded",l,!1)):document.attachEvent&&(v=h,o=a.document,t=!1,p(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,c())})}(window); \ No newline at end of file +!function(a){var h,l,v,o,z,t='',m=(m=document.getElementsByTagName("script"))[m.length-1].getAttribute("data-injectcss"),i=function(a,h){h.parentNode.insertBefore(a,h)};if(m&&!a.__iconfont__svg__cssinject__){a.__iconfont__svg__cssinject__=!0;try{document.write("")}catch(a){console&&console.log(a)}}function c(){z||(z=!0,v())}function p(){try{o.documentElement.doScroll("left")}catch(a){return void setTimeout(p,50)}c()}h=function(){var a,h=document.createElement("div");h.innerHTML=t,t=null,(h=h.getElementsByTagName("svg")[0])&&(h.setAttribute("aria-hidden","true"),h.style.position="absolute",h.style.width=0,h.style.height=0,h.style.overflow="hidden",h=h,(a=document.body).firstChild?i(h,a.firstChild):a.appendChild(h))},document.addEventListener?~["complete","loaded","interactive"].indexOf(document.readyState)?setTimeout(h,0):(l=function(){document.removeEventListener("DOMContentLoaded",l,!1),h()},document.addEventListener("DOMContentLoaded",l,!1)):document.attachEvent&&(v=h,o=a.document,z=!1,p(),o.onreadystatechange=function(){"complete"==o.readyState&&(o.onreadystatechange=null,c())})}(window); \ No newline at end of file diff --git a/ymir/web/src/assets/icons/iconfont.json b/ymir/web/src/assets/icons/iconfont.json index 50e1e367fb..575a464ffb 100644 --- a/ymir/web/src/assets/icons/iconfont.json +++ b/ymir/web/src/assets/icons/iconfont.json @@ -676,6 +676,55 @@ "font_class": "global-line", "unicode": "e6f4", "unicode_decimal": 59124 + }, + { + "icon_id": "96788531", + "name": "project", + "font_class": "project", + "unicode": "e6f5", + "unicode_decimal": 59125 + }, + { + "icon_id": "98145138", + "name": "revert", + "font_class": "revert", + "unicode": "e6f6", + "unicode_decimal": 59126 + }, + { + "icon_id": "99361606", + "name": "code-box-line", + "font_class": "code-box-line", + "unicode": "e6f7", + "unicode_decimal": 59127 + }, + { + "icon_id": "99361607", + "name": "loader", + "font_class": "loader", + "unicode": "e6f8", + "unicode_decimal": 59128 + }, + { + "icon_id": "99361608", + "name": "than_list", + "font_class": "than_list", + "unicode": "e6f9", + "unicode_decimal": 59129 + }, + { + "icon_id": "99361609", + "name": "than", + "font_class": "than", + "unicode": "e6fa", + "unicode_decimal": 59130 + }, + { + "icon_id": "99361610", + "name": "pretreatment", + "font_class": "pretreatment", + "unicode": "e6fb", + "unicode_decimal": 59131 } ] } diff --git a/ymir/web/src/assets/icons/iconfont.ttf b/ymir/web/src/assets/icons/iconfont.ttf index 1b5030748f..4844f7ae3a 100644 Binary files a/ymir/web/src/assets/icons/iconfont.ttf and b/ymir/web/src/assets/icons/iconfont.ttf differ diff --git a/ymir/web/src/assets/sample.png b/ymir/web/src/assets/sample.png new file mode 100644 index 0000000000..6e80e8bd62 Binary files /dev/null and b/ymir/web/src/assets/sample.png differ diff --git a/ymir/web/src/components/common/CheckProjectDirty.js b/ymir/web/src/components/common/CheckProjectDirty.js new file mode 100644 index 0000000000..6f3c01cad9 --- /dev/null +++ b/ymir/web/src/components/common/CheckProjectDirty.js @@ -0,0 +1,31 @@ +import { Button, Space, Tag } from "antd" +import useProjectStatus from "@/hooks/useProjectStatus" +import { useEffect, useState } from "react" + +import t from '@/utils/t' + +const CheckProjectDirty = ({ pid, initialCheck, callback = () => {}, ...props }) => { + const { checkDirty } = useProjectStatus(pid) + const [isDirty, setDirty] = useState(null) + const [checked, setChecked] = useState(false) + useEffect(() => { + initialCheck && checkStatus() + }, []) + + async function checkStatus() { + const dirty = await checkDirty() + setDirty(dirty) + setChecked(true) + callback(dirty) + } + + return + {checked ? + isDirty ? t('project.workspace.status.dirty', { dirtyLabel: Dirty }) + : t('project.workspace.status.clean', { cleanLabel: Clean }) + : null} + + +} + +export default CheckProjectDirty diff --git a/ymir/web/src/components/common/breadcrumb.js b/ymir/web/src/components/common/breadcrumb.js index ec4f32577a..baf133c993 100644 --- a/ymir/web/src/components/common/breadcrumb.js +++ b/ymir/web/src/components/common/breadcrumb.js @@ -34,8 +34,8 @@ function Breadcrumbs({ suffix = '', titles = {} }) { const params = useParams() || {} const crumbs = getCrumbs() const crumbItems = getCrumbItems(path, crumbs) - return
- + return
+ {crumbItems.map((crumb, index) => { const last = index === crumbItems.length - 1 const link = crumb.path.replace(/:([^\/]+)/g, (str, key) => { diff --git a/ymir/web/src/components/common/common.less b/ymir/web/src/components/common/common.less index 9dbb0a6783..52a8e16259 100644 --- a/ymir/web/src/components/common/common.less +++ b/ymir/web/src/components/common/common.less @@ -40,22 +40,12 @@ } /** component: icons **/ .cicon { + // margin-left: 6px; font-size: 20px; - // line-height: 20px; + line-height: 20px; vertical-align: middle; } -/** component: breadcrumb **/ -.breadcrumb { - height: 50px; - line-height: 50px; - background: #fff; - margin: 0 -5vw 20px; - padding: 0 5vw; - box-shadow: 0px 0px 10px rgba(0, 106, 107, 0.1); - .breadcrumbContent { - line-height: 50px; - } -} + /** quickActions **/ .quickActions { position: fixed; diff --git a/ymir/web/src/components/common/hide.js b/ymir/web/src/components/common/hide.js new file mode 100644 index 0000000000..790adcddad --- /dev/null +++ b/ymir/web/src/components/common/hide.js @@ -0,0 +1,64 @@ +import t from "@/utils/t" +import confirm from '@/components/common/dangerConfirm' +import { connect } from "dva" +import { forwardRef, useImperativeHandle } from "react" +import { message, Tag } from "antd" + +const Hide = forwardRef(({ type = 0, msg = 'dataset.action.hide.confirm.content', + excludeMsg = 'dataset.action.hide.confirm.exclude', ok = () => { }, ...func }, ref) => { + useImperativeHandle(ref, () => { + return { + hide, + } + }) + + function hide(versions, exclude = []) { + if (!versions?.length) { + return message.warn(t('common.selected.required')) + } + const hideVersions = versions.filter(vs => !exclude.includes(vs.id)) + const labels = getLabels(hideVersions) + const excludeLabels = getLabels(versions.filter(vs => exclude.includes(vs.id))) + const ids = hideVersions.map(({ id }) => id) + const pid = versions[0].projectId + const emsg =
{t(excludeMsg, { labels: excludeLabels })}
+ if (!hideVersions?.length) { + return message.error(emsg) + } + confirm({ + content:
+

{t(msg, { name: labels })}

+ {excludeLabels.length ? emsg : null} +
, + onOk: async () => { + const result = await func.hide(!type ? 'dataset' : 'model', pid, ids) + if (result) { + ok(result) + } + }, + okText: t('common.action.hide'), + }) + } + + return null +}) + +const getLabels = (labels) => labels.map(version => + {version.name} {version.versionName} +) + +const actions = (dispatch) => { + return { + hide(module = 'dataset', pid, ids) { + const type = `${module}/hide` + return dispatch({ + type, + payload: { pid, ids, }, + }) + }, + } +} + +export default connect(null, actions, null, { forwardRef: true })(Hide) diff --git a/ymir/web/src/components/common/icons.js b/ymir/web/src/components/common/icons.js index 555409ada3..75fa2b7e77 100644 --- a/ymir/web/src/components/common/icons.js +++ b/ymir/web/src/components/common/icons.js @@ -97,12 +97,19 @@ export const UserSharedIcon = iconFont('user-shared') export const LinkIcon = iconFont('user-associated') export const TvIcon = iconFont('tv') export const IterativeIcon = iconFont('Iterative') -export const barChart2LineIcon = iconFont('bar-chart-2-line') -export const artboard2LineIcon = iconFont('artboard-2-line') -export const repeat2LineIcon = iconFont('repeat-2-line') -export const skipBackMiniLineIcon = iconFont('skip-back-mini-line') -export const terminalBoxLineIcon = iconFont('terminal-box-line') -export const bugLineIcon = iconFont('bug-line') -export const questionnaireLineIcon = iconFont('questionnaire-line') -export const bracketsLineIcon = iconFont('brackets-line') -export const globalLineIcon = iconFont('global-line') +export const BarChart2LineIcon = iconFont('bar-chart-2-line') +export const Artboard2LineIcon = iconFont('artboard-2-line') +export const Repeat2LineIcon = iconFont('repeat-2-line') +export const SkipBackMiniLineIcon = iconFont('skip-back-mini-line') +export const TerminalBoxLineIcon = iconFont('terminal-box-line') +export const BugLineIcon = iconFont('bug-line') +export const QuestionnaireLineIcon = iconFont('questionnaire-line') +export const BracketsLineIcon = iconFont('brackets-line') +export const GlobalLineIcon = iconFont('global-line') +export const ProjectIcon = iconFont('project') +export const RevertIcon = iconFont('revert') +export const CodeBoxLineIcon = iconFont('code-box-line') +export const LoaderIcon = iconFont('loader') +export const CompareListIcon = iconFont('than_list') +export const CompareIcon = iconFont('than') +export const PretreatmentIcon = iconFont('pretreatment') diff --git a/ymir/web/src/components/common/ignoreKeywords.js b/ymir/web/src/components/common/ignoreKeywords.js index 764fb1d0bb..0f88637bc2 100644 --- a/ymir/web/src/components/common/ignoreKeywords.js +++ b/ymir/web/src/components/common/ignoreKeywords.js @@ -1,22 +1,9 @@ -import { connect } from "dva" -import { Button, Col, message, Row, Tag } from "antd" +import { Col, Row, Tag } from "antd" -import t from "@/utils/t" +import AddKeywordsBtn from "../keyword/addKeywordsBtn" -function IgnoreKeywords({ keywords = [], addKeywords }) { - async function addIgnoreKeywords() { - if (!keywords.length) { - return - } - const params = keywords.map(k => ({ - name: k, alias: [] - })) - const result = await addKeywords(params) - if (result) { - message.success(t('keyword.add.success')) - } - } +function IgnoreKeywords({ keywords = [] }) { return ( @@ -25,19 +12,10 @@ function IgnoreKeywords({ keywords = [], addKeywords }) { ))} - {keywords.length ? : null } + {keywords.length ? : null } ) } -const actions = (dispatch) => { - return { - addKeywords(keywords) { - return dispatch({ - type: "keyword/updateKeywords", - payload: { keywords }, - }) - }, - } -} -export default connect(null, actions)(IgnoreKeywords) + +export default IgnoreKeywords diff --git a/ymir/web/src/components/dataset/detail.js b/ymir/web/src/components/dataset/detail.js index dd87674ee2..53ceea655d 100644 --- a/ymir/web/src/components/dataset/detail.js +++ b/ymir/web/src/components/dataset/detail.js @@ -15,14 +15,14 @@ function DatasetDetail({ dataset = {} }) { const labelStyle = { width: '15%', paddingRight: '20px', justifyContent: 'flex-end' } return ( -
+
- + {dataset.name} {dataset.versionName} @@ -35,6 +35,7 @@ function DatasetDetail({ dataset = {} }) { {dataset.assetCount} + {dataset.hidden ? {t('common.state.hidden')} : null } {dataset?.keywords?.map(keyword => {keyword})}
diff --git a/ymir/web/src/components/dataset/detail.less b/ymir/web/src/components/dataset/detail.less index 7c42868db2..e69de29bb2 100644 --- a/ymir/web/src/components/dataset/detail.less +++ b/ymir/web/src/components/dataset/detail.less @@ -1,20 +0,0 @@ -.datasetDetail { - .infoTable { - margin-bottom: 20px; - :global(.ant-descriptions-view){ - border: none; - } - tr,th,td { - border: none; - } - tr { - margin-bottom: 1px; - border-bottom: 1px solid #fff; - } - } -} -.title { - border-bottom: 1px solid #ccc; - padding-bottom: 10px; - font-weight: bold; -} diff --git a/ymir/web/src/components/dataset/list.js b/ymir/web/src/components/dataset/list.js index b36767b4be..226b36b09c 100644 --- a/ymir/web/src/components/dataset/list.js +++ b/ymir/web/src/components/dataset/list.js @@ -2,7 +2,7 @@ import React, { useEffect, useRef, useState } from "react" import { connect } from 'dva' import styles from "./list.less" import { Link, useHistory, useLocation } from "umi" -import { Form, Button, Input, Table, Space, Modal, Row, Col, Tooltip, Pagination, } from "antd" +import { Form, Button, Input, Table, Space, Modal, Row, Col, Tooltip, Pagination, message, } from "antd" import t from "@/utils/t" import { humanize } from "@/utils/number" @@ -13,15 +13,14 @@ import { states } from '@/constants/dataset' import StateTag from "@/components/task/stateTag" import EditBox from "@/components/form/editBox" import Terminate from "@/components/task/terminate" -import Del from "./del" -import DelGroup from "./delGroup" +import Hide from "../common/hide" import RenderProgress from "@/components/common/progress" import TypeTag from "@/components/task/typeTag" import Actions from "@/components/table/actions" import { ImportIcon, ScreenIcon, TaggingIcon, TrainIcon, VectorIcon, WajueIcon, SearchIcon, - EditIcon, DeleteIcon, CopyIcon, StopIcon, ArrowDownIcon, ArrowRightIcon, + EditIcon, EyeOffIcon, CopyIcon, StopIcon, ArrowDownIcon, ArrowRightIcon, CompareIcon, } from "@/components/common/icons" const { confirm } = Modal @@ -33,12 +32,12 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve const history = useHistory() const [datasets, setDatasets] = useState([]) const [datasetVersions, setDatasetVersions] = useState({}) - const [total, setTotal] = useState(0) + const [total, setTotal] = useState(1) const [form] = useForm() const [current, setCurrent] = useState({}) const [visibles, setVisibles] = useState(group ? { [group]: true } : {}) - const delRef = useRef(null) - const delGroupRef = useRef(null) + const [selectedVersions, setSelectedVersions] = useState({}) + const hideRef = useRef(null) let [lock, setLock] = useState(true) const terminateRef = useRef(null) @@ -74,11 +73,15 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve useEffect(() => { let dvs = setVersionLabelsByProject(versions, project) + setDatasetVersions(dvs) + }, [project, versions]) + + useEffect(() => { if (iterations?.length) { - dvs = setVersionLabelsByIterations(versions, iterations) + const dvs = setVersionLabelsByIterations(versions, iterations) + setDatasetVersions(dvs) } - setDatasetVersions(dvs) - }, [versions, project, iterations]) + }, [versions, iterations]) useEffect(() => { Object.keys(versions).forEach(gid => { @@ -191,7 +194,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } const actionMenus = (record) => { - const { id, state, taskState, task } = record + const { id, groupId, state, taskState, task, isProtected } = record const menus = [ { key: "fusion", @@ -228,6 +231,13 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve onclick: () => history.push(`/home/task/label/${pid}?did=${id}`), icon: , }, + { + key: "compare", + label: t("common.action.compare"), + hidden: () => !isValidDataset(state), + onclick: () => history.push(`/home/project/${pid}/dataset/${groupId}/compare/${id}`), + icon: , + }, { key: "copy", label: t("task.action.copy"), @@ -242,14 +252,13 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve hidden: () => taskState === TASKSTATES.PENDING || !isRunning(state) || task.is_terminated, icon: , }, - // { - // key: "del", - // label: t("dataset.action.del"), - // onclick: () => del(id, `${name} ${versionName}`), - // disabled: isProtected, - // hidden: () => isRunning(state), - // icon: , - // }, + { + key: "hide", + label: t("common.action.hide"), + onclick: () => hide(record), + hidden: () => hideHidden(record), + icon: , + }, ] return menus } @@ -257,6 +266,8 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve const tableChange = ({ current, pageSize }, filters, sorters = {}) => { } + const hideHidden = ({ state, id }) => isRunning(state) || project?.hiddenDatasets?.includes(id) + const getTypeFilter = gid => { return getFilters(gid, 'taskType', (type) => t(getTaskTypeLabel(type))) } @@ -272,7 +283,7 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } } - const listChange = ({ current, pageSize }) => { + const listChange = (current, pageSize) => { const limit = pageSize const offset = (current - 1) * pageSize func.updateQuery({ ...query, limit, offset }) @@ -340,36 +351,22 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } function setLabelByIterations(item, iterations) { - iterations.forEach(iteration => { - const ids = [ - iteration.miningSet, - iteration.miningResult, - iteration.labelSet, - iteration.trainUpdateSet, - iteration.trainSet, - ].filter(id => id) - if (ids.includes(item.id)) { - item.iterationLabel = t('iteration.tag.round', iteration) - item.iterationRound = iteration.round - } - }) + const iteration = iterations.find(iter => [ + iter.miningSet, + iter.miningResult, + iter.labelSet, + iter.trainUpdateSet, + iter.trainSet, + ].filter(id => id).includes(item.id)) + if (iteration) { + item.iterationLabel = t('iteration.tag.round', iteration) + item.iterationRound = iteration.round + } return item } - const delGroup = (id, name) => { - delGroupRef.current.del(id, name) - } - const del = (id, name) => { - delRef.current.del(id, name) - } - - const delOk = (id) => { - func.getVersions(id, true) - fetchDatasets() - } - - const delGroupOk = () => { - fetchDatasets() + function rowSelectChange(gid, rowKeys) { + setSelectedVersions(old => ({ ...old, [gid]: rowKeys })) } const stop = (dataset) => { @@ -417,6 +414,43 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve } } + const multipleCompare = () => { + const ids = Object.values(selectedVersions).flat() + const vss = Object.values(versions).flat().filter(({ id }) => ids.includes(id)) + const groups = [...new Set(vss.map(item => item.groupId))] + const diffGroup = groups.length > 1 + if (diffGroup) { + // diff group + return message.error(t('dataset.compare.error.diff_group')) + } + + const diffAssets = [...new Set(vss.map(item => item.assetCount))].length > 1 + if (diffAssets) { + // diff assets count + return message.error(t('dataset.compare.error.diff_assets')) + } + history.push(`/home/project/${pid}/dataset/${groups[0]}/compare/${ids}`) + } + + const multipleHide = () => { + const ids = Object.values(selectedVersions).flat() + const allVss = Object.values(versions).flat() + const vss = allVss.filter(({ id }) => ids.includes(id)) + hideRef.current.hide(vss, project.hiddenDatasets) + } + + const hide = (version) => { + if (project.hiddenDatasets.includes(version.id)) { + return message.warn(t('dataset.hide.single.invalid')) + } + hideRef.current.hide([version]) + } + + const hideOk = (result) => { + result.forEach(item => fetchVersions(item.dataset_group_id, true)) + fetchDatasets(true) + setSelectedVersions({}) + } function isValidDataset(state) { return states.VALID === state @@ -432,48 +466,64 @@ function Datasets({ pid, project = {}, iterations, group, datasetList, query, ve ) + const renderMultipleActions = Object.values(selectedVersions).flat().length ? ( + <> + + + + ) : null + const renderGroups = (<> -
+
{datasets.map(group =>
- + showVersions(group.id)}> - {visibles[group.id] ? : } - {group.name} + {visibles[group.id] ? : } + {group.name} {group.projectLabel ? {group.projectLabel} : null} edit(group)} title={t('common.modify')}> - - +} + +const props = (state) => { + return { + logined: state.user.logined, + } +} + +const actions = (dispatch) => { + return { + getHiddenList(module, id, query) { + const type = `${module}/getHiddenList` + return dispatch({ + type, + payload: { ...query, project_id: id, }, + }) + }, + } +} + +export default connect(props, actions)(HiddenList) diff --git a/ymir/web/src/pages/project/components/iteration.js b/ymir/web/src/pages/project/components/iteration.js index 762f41e5e2..d810ecb855 100644 --- a/ymir/web/src/pages/project/components/iteration.js +++ b/ymir/web/src/pages/project/components/iteration.js @@ -150,7 +150,7 @@ function Iteration({ project, fresh = () => { }, ...func }) {
{stages.map((stage) => ( - + ))} diff --git a/ymir/web/src/pages/project/components/iteration.less b/ymir/web/src/pages/project/components/iteration.less index 2146ea9f19..cd1ceebc6a 100644 --- a/ymir/web/src/pages/project/components/iteration.less +++ b/ymir/web/src/pages/project/components/iteration.less @@ -1,19 +1,32 @@ .num { display: inline-block; - width: 22px; - height: 22px; + width: 24px; + height: 24px; border-radius: 50px; - border: 1px solid @primary-color; + color: @primary-color; text-align: center; + &.pending { + color: rgba(0, 0, 0, 0.45); + border: 1px solid rgba(0, 0, 0, 0.45); + } + &.finish { + border: 1px solid @primary-color; + } + &.current { + color: white; + border: 1px solid @primary-color; + background-color: @primary-color; + } } .lineContainer { padding: 0 4px; } .line { display: inline-block; - height: 4px; + height: 1px; width: 100%; background-color: @primary-color; + vertical-align: middle; } .act { line-height: 32px; @@ -23,10 +36,19 @@ text-overflow: ellipsis; overflow: hidden; white-space: nowrap; + color: rgba(0, 0, 0, 0.45); + margin-top: 5px; + width: 0; + .current { + color: rgba(232, 185, 0, 1); + } } .skip { cursor: pointer; color: @btn-primary-bg; + display: inline-block; + margin-top: 5px; + text-decoration: underline; &:hover { text-decoration: underline; } diff --git a/ymir/web/src/pages/project/components/list.js b/ymir/web/src/pages/project/components/list.js index a416bd654e..34d391f5ed 100644 --- a/ymir/web/src/pages/project/components/list.js +++ b/ymir/web/src/pages/project/components/list.js @@ -122,7 +122,7 @@ const ProjectList = ({ list, query, ...func }) => { } const addBtn = ( - + @@ -143,52 +143,52 @@ const ProjectList = ({ list, query, ...func }) => { ) const renderItem = (item) => { - const title = + const title = {item.name} - - {t('project.train_classes')}: - {item.keywords.join(',')} + + {t('project.train_classes')}: + {item.keywords.join(',')} - - {t('project.target.map')}: - {item?.targetMap} - - - {t('project.iteration.current')}: - {t(getStageLabel(item.currentStage, item.round))} + + {t('project.iteration.current')}: + {t(getStageLabel(item.currentStage, item.round))} {more(item)} const desc = <> - + -
Datasets
-
{item.setCount}
+
Datasets
+
{item.setCount}
-
Models
-
{item.modelCount}
+
Models
+
{item.modelCount}
-
{t('project.train_set')}/{t('project.test_set')}/{t('project.mining_set')}
-
{item.trainSet?.name}/{item.testSet?.name}/{item.miningSet?.name}
+
{t('project.train_set')}/{t('project.test_set')}/{t('project.mining_set')}
+
+ {item.trainSet?.name}| + {item.testSet?.name}| + {item.miningSet?.name} +
-
{t('project.iteration.number')}
-
{item.round}/{item?.targetIteration}
+
{t('project.iteration.number')}
+
{item.round}
- {t('project.content.desc')}: {item.description} - {item.createTime} + {t('project.content.desc')}: {item.description} + {item.createTime} - return + return history.push(`/home/project/detail/${item.id}`)}> @@ -203,12 +203,12 @@ const ProjectList = ({ list, query, ...func }) => { {searchPanel} }> - t('project.list.total', { total })} showQuickJumper showSizeChanger /> diff --git a/ymir/web/src/pages/project/components/list.less b/ymir/web/src/pages/project/components/list.less index 30a57a9f94..a46c406b0e 100644 --- a/ymir/web/src/pages/project/components/list.less +++ b/ymir/web/src/pages/project/components/list.less @@ -1,67 +1,4 @@ + .listContainer { min-height: calc(100vh - 272px); } -.list { - :global(.ant-list-item), :global(.ant-list-item:last-child) { - padding: 10px 20px; - border: 1px solid rgba(0, 0, 0, 0.05); - margin: 10px 0; - border-radius: 2px; - &:hover { - border-color: @primary-color; - background-color: fade(@primary-color, 5); - } - &.falure { - border-color:rgb(242, 99, 123); - } - } - :global(.ant-list-item-meta-title) { - font-size: 18px; - } - .title { - .titleItem { - font-size: 14px; - } - } - .titleLabel, .bottomContent { - display: inline-block; - margin-right: 10px; - color: rgba(0, 0, 0, 0.45); - } - .titleContent { - color: rgba(0, 0, 0, 0.85); - } - .bottomLabel { - color: rgba(0, 0, 0, 0.65); - } - .content { - margin-bottom: 10px; - // width: 100%; - text-align: center; - .contentLabel { - color:rgba(0, 0, 0, 0.45); - } - .contentContent, .sets { - color: rgba(0, 0, 0, 0.85); - font-size: 20px; - } - .sets { - color: @primary-color; - white-space: nowrap; - text-overflow: ellipsis; - width: 100%; - overflow: hidden; - } - } -} -.actions { - background: #fff; - padding: 20px; - margin-bottom: 10px; - width: 100%; - box-shadow: 0px 1px 0px rgba(0, 0, 0, 0.06); -} -.pager { - display: flex; - justify-content: flex-end; -} diff --git a/ymir/web/src/pages/project/components/stage.js b/ymir/web/src/pages/project/components/stage.js index 79b845e2ea..e69b16af4e 100644 --- a/ymir/web/src/pages/project/components/stage.js +++ b/ymir/web/src/pages/project/components/stage.js @@ -6,6 +6,7 @@ import { states, statesLabel } from '@/constants/dataset' import s from './iteration.less' import { useEffect, useState } from "react" import RenderProgress from "../../../components/common/progress" +import { YesIcon } from '@/components/common/icons' function Stage({ pid, stage, stageResult, current = 0, end = false, callback = () => { }, ...func }) { const history = useHistory() @@ -86,11 +87,9 @@ function Stage({ pid, stage, stageResult, current = 0, end = false, callback = ( const stateClass = `${s.stage} ${currentStage() ? s.current : (finishStage() ? s.finish : s.pending)}` const renderCount = () => { - if (finishStage() || (currentStage() && isValid())) { - return '√' // finish state - } else { - return stage.value + 1 - } + const content = finishStage() || (currentStage() && isValid()) ? : stage.value + 1 + const cls = pendingStage() ? s.pending : (currentStage() ? s.current : s.finish) + return {content} } const renderMain = () => { return currentStage() ? renderMainBtn() : {t(stage.act)} @@ -118,8 +117,8 @@ function Stage({ pid, stage, stageResult, current = 0, end = false, callback = ( (isValid() ? (result.name ?`${result.name} ${result.versionName}` : (end ? null : t('common.done'))) : - isPending() && currentStage() ? t('project.stage.state.pending.current') : t(statesLabel(state))) : - t(pending) + {isPending() && currentStage() ? t('project.stage.state.pending.current') : t(statesLabel(state))}) : + {t(pending)} } const renderSkip = () => { @@ -128,7 +127,7 @@ function Stage({ pid, stage, stageResult, current = 0, end = false, callback = ( return (
- {renderCount()} + {renderCount()} {renderMain()} diff --git a/ymir/web/src/pages/project/detail.js b/ymir/web/src/pages/project/detail.js index 2a8e3f0833..b47e6a4761 100644 --- a/ymir/web/src/pages/project/detail.js +++ b/ymir/web/src/pages/project/detail.js @@ -3,7 +3,7 @@ import { Card, Col, Popover, Row, Space } from "antd" import { useLocation, useParams, connect, Link, useHistory } from "umi" import t from "@/utils/t" -import { getStageLabel } from '@/constants/project' +import { getStageLabel, tabs } from '@/constants/project' import Breadcrumbs from "@/components/common/breadcrumb" import Iteration from './components/iteration' import Datasets from '@/components/dataset/list' @@ -12,11 +12,8 @@ import Models from '@/components/model/list' import s from "./detail.less" import Prepare from "./components/prepare" import KeywordRates from "@/components/dataset/keywordRates" - -const tabsTitle = [ - { tab: t('project.tab.set.title'), key: 'set', }, - { tab: t('project.tab.model.title'), key: 'model', }, -] +import { EditIcon, SearchEyeIcon, EyeOffIcon } from "../../components/common/icons" +import CheckProjectDirty from "../../components/common/CheckProjectDirty" function ProjectDetail(func) { const history = useHistory() @@ -25,10 +22,10 @@ function ProjectDetail(func) { const [iterations, setIterations] = useState([]) const [group, setGroup] = useState(0) const [project, setProject] = useState({}) - const [active, setActive] = useState(tabsTitle[0].key) + const [active, setActive] = useState(tabs[0].key) const content = { - 'set': , - 'model': + [tabs[0].key]: , + [tabs[1].key]: } useEffect(() => { @@ -40,7 +37,7 @@ function ProjectDetail(func) { const locationHash = location.hash.replace(/^#/, '') const [tabKey, gid] = (locationHash || '').split('_') setGroup(gid) - setActive(tabKey || tabsTitle[0].key) + setActive(tabKey || tabs[0].key) }, [location.hash]) async function fetchProject(force) { @@ -71,7 +68,7 @@ function ProjectDetail(func) { { dataset: project.testSet, label: 'project.add.form.test.set', name: getDsName(project.testSet) }, { dataset: project.miningSet, label: 'project.add.form.mining.set', name: getDsName(project.miningSet) }, ] - + return maps.map(({ name, label, dataset }) => { const rlabel = {t(label)}: {name} return @@ -95,37 +92,36 @@ function ProjectDetail(func) {
- + {project.name} {t('project.detail.info.iteration', { stageLabel: {t(getStageLabel(project.currentStage, project.round))}, current: {project.round}, - target: {project.targetIteration} })} - {t('project.train_classes')}: {project?.keywords?.join(',')} - {project.targetMap ? {t('project.target.map')}: {project.targetMap}% : null} - {project.targetDataset ? {t('project.target.dataset')}: {project.targetDataset} : null} + {t('project.train_classes')}: {project?.keywords?.join(',')} {project.description ? {t('project.detail.desc')}: {project.description} : null} - {t('project.settings.title')} - {t('breadcrumbs.project.iterations')} + {t('project.settings.title')} + {t('breadcrumbs.project.iterations')} + {t('common.hidden.list')} {project.round > 0 ? : } - + {renderProjectDatasetLabel()}
- ({ ...tab, tab: t(tab.tab) }))} tabBarExtraContent={} + activeTabKey={active} onTabChange={tabChange} className='noShadow' style={{ margin: '-20px -5vw 0', background: 'transparent' }} - headStyle={{ padding: '0 5vw', background: '#fff', marginBottom: '20px' }} + headStyle={{ padding: '0 5vw', background: '#fff', marginBottom: '10px' }} bodyStyle={{ padding: '0 5vw' }}> {content[active]} diff --git a/ymir/web/src/pages/project/detail.less b/ymir/web/src/pages/project/detail.less index 7e7058ae24..fd9a5847b3 100644 --- a/ymir/web/src/pages/project/detail.less +++ b/ymir/web/src/pages/project/detail.less @@ -1,40 +1,35 @@ .header { background: #fff; margin: -20px -5vw 0; - padding: 0 5vw 20px; + padding: 0 5vw 30px; } .detailPanel { - margin-bottom: 10px; + margin-bottom: 20px; } .title { border-bottom: 1px solid #ccc; padding-bottom: 10px; font-weight: bold; } - -:global(.ant-card-body){ - .infoTable { - margin-bottom: 20px; - :global(.ant-descriptions-view){ - border: none; - } - tr,th,td { - border: none; - } - tr { - margin-bottom: 1px; - border-bottom: 1px solid #fff; - } - } +.noShadow { + box-shadow: 0px 0px 0px rgb(255 255 255); } .detailPanel { + :global(.ant-space-item) { + margin-right: 16px; + color: rgba(0, 0, 0, 0.65); + } .name { font-size: 20px; font-weight: bold; + color: rgba(0, 0, 0, 0.85); } .orange { color: orange; } + .black { + color: rgba(0, 0, 0, 0.85); + } .bold { font-weight: bold; } @@ -43,7 +38,7 @@ } } .setsPanel { - margin: 20px 0; + margin-top: 20px; padding: 10px; background-color: #fafafa; } diff --git a/ymir/web/src/pages/project/hidden.js b/ymir/web/src/pages/project/hidden.js new file mode 100644 index 0000000000..ed1fb594d4 --- /dev/null +++ b/ymir/web/src/pages/project/hidden.js @@ -0,0 +1,41 @@ +import React, { useEffect, useState } from "react" +import s from "./index.less" +import { useHistory, useLocation, useParams } from "umi" +import { Card, } from "antd" + +import t from "@/utils/t" +import { tabs } from '@/constants/project' +import Breadcrumbs from "@/components/common/breadcrumb" +import HiddenList from "./components/hiddenList" + +function Hidden() { + const history = useHistory() + const location = useLocation() + const { id } = useParams() + const [active, setActive] = useState(tabs[0].key) + + useEffect(() => { + const tabKey = location.hash.replace(/^#/, '') + setActive(tabKey || tabs[0].key) + }, [location.hash]) + + function tabChange(key) { + history.push(`#${key}`) + } + + return ( +
+ + ({ ...tab, tab: t(tab.tab) }))} activeTabKey={active} onTabChange={tabChange} + className='noShadow' + bordered={false} + style={{ margin: '-20px -5vw 0', background: 'transparent' }} + headStyle={{ padding: '0 5vw', background: '#fff', marginBottom: '20px' }} + bodyStyle={{ padding: '0 5vw' }}> + + +
+ ) +} + +export default Hidden diff --git a/ymir/web/src/pages/project/index.less b/ymir/web/src/pages/project/index.less index f0d7b1e4f5..4eb7fd3097 100644 --- a/ymir/web/src/pages/project/index.less +++ b/ymir/web/src/pages/project/index.less @@ -29,40 +29,49 @@ justify-content: flex-end; } .iterations { - // .td { - // position: relative; - .extraTag { - position: absolute; - right: 0; - top: 2px; - font-size: 12px; - line-height: 12px; - color: #fff; - text-align: right; - .negative, .positive, .neutral { + .table { + min-height: calc(100vh - 305px); + } + .extraTag { + position: absolute; + right: 0; + top: 2px; + font-size: 12px; + line-height: 12px; + color: #fff; + text-align: right; + .negative, .positive, .neutral { + display: inline-block; + padding: 2px 4px; + border-radius: 4px; + &::after { display: inline-block; - padding: 2px 4px; - border-radius: 4px; - &::after { - display: inline-block; - margin-left: 4px; - } - } - .negative { - background-color: darkred; - &::after { - content: '\2193'; - } + margin-left: 4px; } - .positive { - &::after { - content: '\2191'; - } - background-color: darkgreen; + } + .negative { + background-color: darkred; + &::after { + content: '\2193'; } - .neutral { - background-color: darkgray; + } + .positive { + &::after { + content: '\2191'; } + background-color: darkgreen; + } + .neutral { + background-color: darkgray; } - // } + } +} +.orange { + color: orange; +} +.hiddenList { + .table { + padding: 20px; + min-height: calc(100vh - 320px); + } } diff --git a/ymir/web/src/pages/project/iterations.js b/ymir/web/src/pages/project/iterations.js index 3d8f6aaa12..74c09883af 100644 --- a/ymir/web/src/pages/project/iterations.js +++ b/ymir/web/src/pages/project/iterations.js @@ -6,6 +6,7 @@ import { Form, Table, Modal, ConfigProvider, Card, Space, Row, Col, Button, Popo import t from "@/utils/t" import { percent, isNumber } from '@/utils/number' +import { getStageLabel } from '@/constants/project' import Breadcrumbs from "@/components/common/breadcrumb" import KeywordRates from "@/components/dataset/keywordRates" @@ -77,8 +78,8 @@ function Iterations({ ...func }) { dataset.project = project const content = return - {label} - {extra} + {label} + {extra} } @@ -152,9 +153,12 @@ function Iterations({ ...func }) { {t('project.train_classes')}: {project?.keywords?.join(',')} - {t('project.detail.info.iteration', { current: project.round, target: project.targetIteration })} - {project.targetMap ? {t('project.target.map')}: {project.targetMap}% : null} - {project.targetDataset ? {t('project.target.dataset')}: {project.targetDataset} : null} + + {t('project.detail.info.iteration', { + stageLabel: {t(getStageLabel(project.currentStage, project.round))}, + current: {project.round}, + })} + {project.description ? {t('project.detail.desc')}: {project.description} : null}
diff --git a/ymir/web/src/pages/task/compare/components/keywordSelect.js b/ymir/web/src/pages/task/compare/components/keywordSelect.js new file mode 100644 index 0000000000..e0c794e0ee --- /dev/null +++ b/ymir/web/src/pages/task/compare/components/keywordSelect.js @@ -0,0 +1,47 @@ +import { useCallback, useEffect, useState } from "react" + +import t from '@/utils/t' +import { Dropdown, Menu, Space } from "antd" +import { ArrowDownIcon } from "@/components/common/icons" + +const KeywordSelect = ({ value, keywords, onChange = () => {} }) => { + const [selected, setSelected] = useState(null) + const [options, setOptions] = useState([]) + const change = ({ key }) => setSelected(key) + useEffect(() => { + if (keywords?.length) { + setOptions([ + ...keywords.map(label => ({ key: label, label: label })), + { key: '', label: t('common.everage') } + ]) + } else { + setOptions([]) + } + }, [keywords]) + + useEffect(() => { + value && setSelected(value) + }, [value]) + + useEffect(() => { + onChange(selected) + }, [selected]) + + useEffect(() => { + setSelected(options.length ? options[0].key : null) + }, [options]) + + const menus = + + return <> + {t('dataset.column.keyword')}: + + + {selected === '' ? t('common.everage') : selected} + + + + +} + +export default KeywordSelect diff --git a/ymir/web/src/pages/task/compare/index.js b/ymir/web/src/pages/task/compare/index.js new file mode 100644 index 0000000000..ccba9651e6 --- /dev/null +++ b/ymir/web/src/pages/task/compare/index.js @@ -0,0 +1,275 @@ +import React, { useCallback, useEffect, useState } from "react" +import { connect } from "dva" +import { Card, Button, Form, Row, Col, Space, Table, Slider, } from "antd" +import s from "./index.less" +import commonStyles from "../common.less" +import { useHistory, useParams, Link } from "umi" + +import t from "@/utils/t" +import { string2Array } from "@/utils/string" +import Breadcrumbs from "@/components/common/breadcrumb" +import { randomNumber, toFixed } from "@/utils/number" +import Panel from "@/components/form/panel" +import DatasetSelect from "@/components/form/datasetSelect" +import { CompareIcon } from "@/components/common/icons" +import useDynamicRender from "@/hooks/useDynamicRender" +import KeywordSelect from "./components/keywordSelect" +import useBatchModels from "../../../hooks/useBatchModels" + +function Compare({ ...func }) { + const history = useHistory() + const pageParams = useParams() + const pid = +pageParams.id + const gid = +pageParams.gid + const did = string2Array(pageParams.ids) + const [datasets, setDatasets] = useState([]) + const [gt, setGT] = useState({}) + const [iou, setIou] = useState(0.5) + const [confidence, setConfidence] = useState(0.3) + const [keywords, setKeywords] = useState([]) + const [source, setSource] = useState(null) + const [tableSource, setTableSource] = useState([]) + const [form] = Form.useForm() + const [apRender, setSelectedKeyword] = useDynamicRender() + const [models, getModels] = useBatchModels() + + const filterDatasets = useCallback((dss) => { + return filterSameAssets(innerGroup(dss)).filter(ds => ds.id !== gt?.id) + }, [gt]) + + const filterGT = useCallback((dss) => { + return filterSameAssets(innerGroup(dss)).filter(ds => !datasets.map(({ id }) => id).includes(ds.id)) + }, [datasets]) + + useEffect(() => { + setTableSource(generateTableSource(iou)) + }, [iou, source]) + + useEffect(() => { + !source && setKeywords([]) + }, [source]) + + const onFinish = async (values) => { + const params = { + ...values, + projectId: pid, + name: 'task_eveluate_' + randomNumber(), + } + const result = await func.compare(params) + if (result) { + setSource(result) + const list = gt.keywords || [] + setKeywords(list) + fetchModels() + } + } + + function onFinishFailed(errorInfo) { + console.log("Failed:", errorInfo) + } + + function datasetsChange(values, options) { + setDatasets(options.map(option => option.dataset)) + } + + function gtChange(value, option = {}) { + setGT(option?.dataset) + } + + function innerGroup(datasets) { + return datasets.filter(ds => ds.groupId === gid) + } + + function filterSameAssets(datasets = []) { + if (!datasets.length) { + return datasets + } + const target = datasets.find(({ id }) => id === did[0]) + const count = target.assetCount + return datasets.filter(({ assetCount }) => count === assetCount) + } + + function generateTableSource(iou = 0) { + const getInfo = (dataset) => ({ + id: dataset.id, + name: `${dataset.name} ${dataset.versionName}`, + model: dataset.task?.parameters?.model_id, + }) + return source ? [getInfo(gt), ...datasets.map((dataset, index) => { + const datasetSource = source[dataset.id] || {} + const iouMetrics = datasetSource.iou_evaluations || {} + const metrics = iouMetrics[iou] || {} + return { + ...getInfo(dataset), + map: datasetSource.iou_averaged_evaluation, + metrics, + dataset, + } + })] : [] + } + + function retry() { + setSource(null) + } + + function getModelsName(id) { + const model = models.find(md => md.id === id) + return model ? `${model.name} ${model.versionName}` : null + } + + function fetchModels () { + if (datasets.length) { + const ids = datasets.map(({ task: { parameters: { model_id } } }) => model_id).filter(item => item) + getModels(ids) + } + } + + const columns = [ + { + title: t("dataset.column.name"), + dataIndex: "name", + render: (name, { id }) => { + const extra = id === gt.id ? Ground Truth : null + return <>{name} {extra} + }, + ellipsis: { + showTitle: true, + }, + }, + { + title: t("dataset.column.model"), + dataIndex: "model", + render: id => {getModelsName(id)}, + ellipsis: { + showTitle: true, + }, + }, + { + title: t("dataset.column.map"), + dataIndex: "map", + render: apRender, + }, + { + title: 'AP', + dataIndex: 'metrics', + render: apRender, + ellipsis: { + showTitle: true, + }, + }, + ] + + function renderTitle() { + return ( + + {t('breadcrumbs.dataset.compare')} + + + ) + } + + const initialValues = { + datasets: did, + confidence, + } + return ( +
+ + + + + + {t('dataset.compare.form.confidence')}: {source ? confidence : 0} + IoU: + + setIou(value)} /> + + setSelectedKeyword(selected)} /> + + record.id} + rowClassName={(record, index) => index % 2 === 0 ? '' : 'oddRow'} + columns={columns} + pagination={false} + /> + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + ) +} + +const dis = (dispatch) => { + return { + getDataset(id, force) { + return dispatch({ + type: "dataset/getDataset", + payload: { id, force }, + }) + }, + compare(payload) { + return dispatch({ + type: "dataset/compare", + payload, + }) + }, + } +} + +const stat = (state) => { + return { + // allDatasets: state.dataset.allDatasets, + } +} + +export default connect(stat, dis)(Compare) diff --git a/ymir/web/src/pages/task/compare/index.less b/ymir/web/src/pages/task/compare/index.less new file mode 100644 index 0000000000..55baa2a908 --- /dev/null +++ b/ymir/web/src/pages/task/compare/index.less @@ -0,0 +1,23 @@ +.formContainer { + position: relative; +} +.mask { + position: absolute; + width: 100%; + height: 100%; + display: flex; + align-items: end; + justify-content: center; + background-color: rgba(0, 0, 0, 0.1); + z-index: 5; +} + +.extra { + padding: 2px 8px; + font-size: 12px; + color:rgba(232, 185, 0, 1); + background-color: rgba(250, 211, 55, 0.1); +} +:global(body .ant-tooltip-placement-top) { + z-index: 1000; +} \ No newline at end of file diff --git a/ymir/web/src/pages/task/copy/index.js b/ymir/web/src/pages/task/copy/index.js index 0f345c0ea3..5d520cb497 100644 --- a/ymir/web/src/pages/task/copy/index.js +++ b/ymir/web/src/pages/task/copy/index.js @@ -121,7 +121,7 @@ const mapDispatchToProps = (dispatch) => { getDatasets(pid) { return dispatch({ type: "dataset/queryAllDatasets", - payload: pid, + payload: { pid, force: true }, }) }, getDataset(id, force) { diff --git a/ymir/web/src/pages/task/fusion/index.js b/ymir/web/src/pages/task/fusion/index.js index 14e5cbbc21..4a5e2b6e1b 100644 --- a/ymir/web/src/pages/task/fusion/index.js +++ b/ymir/web/src/pages/task/fusion/index.js @@ -37,8 +37,8 @@ function Fusion({ allDatasets, datasetCache, ...func }) { const [selectedExcludeKeywords, setExcludeKeywords] = useState([]) const [visibles, setVisibles] = useState({ merge: true, - filter: false, - sampling: !!chunk, + filter: true, + sampling: true, }) const initialValues = { @@ -99,7 +99,14 @@ function Fusion({ allDatasets, datasetCache, ...func }) { setKeywords(ks) } + const checkInputs = (i) => { + return i.exc || i.inc || i.samples || i?.exclude_datasets?.length || i?.include_datasets?.length + } + const onFinish = async (values) => { + if(!checkInputs(values)) { + return message.error(t('dataset.fusion.validate.inputs')) + } const params = { ...values, project_id: dataset.projectId, @@ -298,7 +305,7 @@ const props = (state) => { } const mapDispatchToProps = (dispatch) => { return { - getDatasets(pid, force) { + getDatasets(pid, force = true) { return dispatch({ type: "dataset/queryAllDatasets", payload: { pid, force }, diff --git a/ymir/web/src/pages/task/inference/index.js b/ymir/web/src/pages/task/inference/index.js index 7e91c61c5b..8e354f4c13 100644 --- a/ymir/web/src/pages/task/inference/index.js +++ b/ymir/web/src/pages/task/inference/index.js @@ -1,6 +1,6 @@ -import React, { useEffect, useState } from "react" +import React, { useCallback, useEffect, useState } from "react" import { connect } from "dva" -import { Select, Card, Input, Radio, Button, Form, Row, Col, ConfigProvider, Space, InputNumber } from "antd" +import { Select, Card, Input, Radio, Button, Form, Row, Col, ConfigProvider, Space, InputNumber, message, Tag, Alert } from "antd" import { PlusOutlined, MinusCircleOutlined, @@ -12,6 +12,7 @@ import commonStyles from "../common.less" import { formLayout } from "@/config/antd" import t from "@/utils/t" +import { string2Array } from "@/utils/string" import { TYPES } from '@/constants/image' import { useHistory, useParams, useLocation } from "umi" import Breadcrumbs from "@/components/common/breadcrumb" @@ -22,23 +23,30 @@ import Tip from "@/components/form/tip" import ModelSelect from "@/components/form/modelSelect" import ImageSelect from "@/components/form/imageSelect" import DatasetSelect from "@/components/form/datasetSelect" +import useAddKeywords from "@/hooks/useAddKeywords" +import AddKeywordsBtn from "@/components/keyword/addKeywordsBtn" const { Option } = Select const Algorithm = () => [{ id: "aldd", label: 'ALDD', checked: true }] -function Inference({ datasetCache, datasets, ...props }) { +function Inference({ datasetCache, datasets, ...func }) { const pageParams = useParams() const pid = Number(pageParams.id) const history = useHistory() const location = useLocation() - const { did, mid, image } = location.query + const { did, image } = location.query + const mid = string2Array(location.query.mid) || [] const [dataset, setDataset] = useState({}) - const [selectedModel, setSelectedModel] = useState({}) + const [selectedModels, setSelectedModels] = useState([]) + const [gpuStep, setGpuStep] = useState(1) const [form] = Form.useForm() const [seniorConfig, setSeniorConfig] = useState([]) const [hpVisible, setHpVisible] = useState(false) const [gpu_count, setGPU] = useState(0) + const [selectedGpu, setSelectedGpu] = useState(0) + const [keywordRepeatTip, setKRTip] = useState('') + const [{ newer }, checkKeywords] = useAddKeywords(true) useEffect(() => { fetchSysInfo() @@ -49,36 +57,39 @@ function Inference({ datasetCache, datasets, ...props }) { }, [seniorConfig]) useEffect(() => { - did && props.getDataset(did) + did && func.getDataset(did) + did && form.setFieldsValue({ datasetId: Number(did) }) }, [did]) + useEffect(() => { + mid?.length && form.setFieldsValue({ model: mid }) + }, [location.query.mid]) + useEffect(() => { datasetCache[did] && setDataset(datasetCache[did]) }, [datasetCache]) useEffect(() => { - pid && props.getDatasets(pid) + pid && func.getDatasets(pid) }, [pid]) useEffect(() => { - const state = location.state - - if (state?.record) { - const { parameters, config, } = state.record - const { description, model_id, docker_image, docker_image_id } = parameters - form.setFieldsValue({ - datasetId: dataset_id, - model: model_id, - docker_image: docker_image_id + ',' + docker_image, - gpu_count: config.gpu_count, - description, - }) - setConfig(config) - setHpVisible(true) + setGpuStep(selectedModels.length || 1) - history.replace({ state: {} }) + checkModelKeywords() + }, [selectedModels]) + + useEffect(() => { + if (newer.length) { + const tip = <> + {t('task.inference.unmatch.keywrods', { + keywords: newer.map(key => {key}) + })} + + + setKRTip(tip) } - }, [location.state]) + }, [newer]) function validHyperparam(rule, value) { @@ -91,8 +102,13 @@ function Inference({ datasetCache, datasets, ...props }) { } } + function checkModelKeywords() { + const keywords = (selectedModels.map(model => model?.keywords) || []).flat().filter(item => item) + checkKeywords(keywords) + } + async function fetchSysInfo() { - const result = await props.getSysInfo() + const result = await func.getSysInfo() if (result) { setGPU(result.gpu_count) } @@ -114,7 +130,7 @@ function Inference({ datasetCache, datasets, ...props }) { form.getFieldValue('hyperparam').forEach(({ key, value }) => key && value ? config[key] = value : null) config['gpu_count'] = form.getFieldValue('gpu_count') || 0 - + const img = (form.getFieldValue('image') || '').split(',') const imageId = Number(img[0]) const image = img[1] @@ -127,9 +143,12 @@ function Inference({ datasetCache, datasets, ...props }) { image, config, } - const result = await props.createInferenceTask(params) + const result = await func.createInferenceTask(params) if (result) { - await props.clearCache() + if (result.filter(item => item).length !== values.model.length) { + message.warn(t('task.inference.failure.some')) + } + await func.clearCache() history.replace(`/home/project/detail/${pid}`) } } @@ -142,23 +161,30 @@ function Inference({ datasetCache, datasets, ...props }) { id && setDataset(datasets.find(ds => ds.id === id)) } - function modelChange(id, model) { - model && setSelectedModel(model) + function modelChange(id, options = []) { + setSelectedModels(options.map(({ model }) => model) || []) + } + + async function selectModelFromIteration() { + const iterations = await func.getIterations(pid) + if (iterations) { + const models = iterations.map(iter => iter.model) || [] + form.setFieldsValue({ model: models }) + } } const getCheckedValue = (list) => list.find((item) => item.checked)["id"] const initialValues = { description: '', - model: mid ? parseInt(mid) : undefined, image: image ? parseInt(image) : undefined, - datasetId: Number(did) ? Number(did) : undefined, algorithm: getCheckedValue(Algorithm()), gpu_count: 0, } return (
- + + {keywordRepeatTip ? : null}
history.push(`/home/dataset/add/${pid}`)} />}> - + }> - - + + + + +
- + - - - {t('task.gpu.tip', { count: gpu_count })} + + + {t('task.infer.gpu.tip', { total: gpu_count, selected: gpuStep * selectedGpu })} + @@ -290,8 +322,8 @@ function Inference({ datasetCache, datasets, ...props }) { - : null } - + : null} +
@@ -342,7 +374,7 @@ const dis = (dispatch) => { getDatasets(pid) { return dispatch({ type: "dataset/queryAllDatasets", - payload: pid, + payload: { pid, force: true }, }) }, getDataset(id, force) { @@ -360,6 +392,12 @@ const dis = (dispatch) => { payload, }) }, + getIterations(id) { + return dispatch({ + type: 'iteration/getIterations', + payload: { id, more: true }, + }) + }, } } diff --git a/ymir/web/src/pages/task/mining/index.js b/ymir/web/src/pages/task/mining/index.js index 37998f8cc4..7562ca3d28 100644 --- a/ymir/web/src/pages/task/mining/index.js +++ b/ymir/web/src/pages/task/mining/index.js @@ -148,7 +148,7 @@ function Mining({ datasetCache, datasets, ...func }) { id && setDataset(datasets.find(ds => ds.id === id)) } - function modelChange(id, model) { + function modelChange(id, { model }) { model && setSelectedModel(model) } @@ -390,7 +390,7 @@ const dis = (dispatch) => { type: "common/getSysInfo", }) }, - getDatasets(pid, force) { + getDatasets(pid, force = true) { return dispatch({ type: "dataset/queryAllDatasets", payload: { pid, force }, diff --git a/ymir/web/src/pages/task/train/index.js b/ymir/web/src/pages/task/train/index.js index 24acde85cd..47411e94d0 100644 --- a/ymir/web/src/pages/task/train/index.js +++ b/ymir/web/src/pages/task/train/index.js @@ -1,6 +1,6 @@ import React, { useEffect, useState } from "react" import { connect } from "dva" -import { Select, Card, Input, Radio, Button, Form, Row, Col, ConfigProvider, Space, InputNumber, Tag } from "antd" +import { Select, Card, Input, Radio, Button, Form, Row, Col, ConfigProvider, Space, InputNumber, Tag, message } from "antd" import { PlusOutlined, MinusCircleOutlined, @@ -22,6 +22,7 @@ import styles from "./index.less" import commonStyles from "../common.less" import ModelSelect from "@/components/form/modelSelect" import KeywordRates from "@/components/dataset/keywordRates" +import CheckProjectDirty from "@/components/common/CheckProjectDirty" const { Option } = Select @@ -45,6 +46,7 @@ function Train({ allDatasets, datasetCache, keywords, ...func }) { const [seniorConfig, setSeniorConfig] = useState([]) const [hpVisible, setHpVisible] = useState(false) const [gpu_count, setGPU] = useState(0) + const [projectDirty, setProjectDirty] = useState(false) const renderRadio = (types) => { return ( @@ -190,6 +192,7 @@ function Train({ allDatasets, datasetCache, keywords, ...func }) {
+ setProjectDirty(dirty)} />
- @@ -445,7 +448,7 @@ const dis = (dispatch) => { payload: { id }, }) }, - getDatasets(pid, force) { + getDatasets(pid, force = true) { return dispatch({ type: "dataset/queryAllDatasets", payload: { pid, force }, diff --git a/ymir/web/src/pages/user/permission/AuditList.js b/ymir/web/src/pages/user/permission/AuditList.js index b53857513c..1ad5c63bb0 100644 --- a/ymir/web/src/pages/user/permission/AuditList.js +++ b/ymir/web/src/pages/user/permission/AuditList.js @@ -23,7 +23,7 @@ function AuditList({ getUsers, setUserState }) { useEffect(() => { getUserList() - }, []) + }, [query]) const columns = [ { diff --git a/ymir/web/src/pages/user/permission/UserList.js b/ymir/web/src/pages/user/permission/UserList.js index 6cafcfbd08..b49e75f4cd 100644 --- a/ymir/web/src/pages/user/permission/UserList.js +++ b/ymir/web/src/pages/user/permission/UserList.js @@ -23,7 +23,7 @@ function UserList({ getUsers, setUserRole, off }) { useEffect(() => { getUserList() - }, []) + }, [query]) const columns = [ { diff --git a/ymir/web/src/services/__test__/dataset.test.js b/ymir/web/src/services/__test__/dataset.test.js index 8b2419981a..094adcb7bc 100644 --- a/ymir/web/src/services/__test__/dataset.test.js +++ b/ymir/web/src/services/__test__/dataset.test.js @@ -8,6 +8,7 @@ import { createDataset, updateDataset, getInternalDataset, + evaluate, } from "../dataset" import { product, products, requestExample } from './func' @@ -118,4 +119,19 @@ describe("service: dataset", () => { const expected = products(11) requestExample(getInternalDataset, {}, { items: expected, total: expected.length }, 'get') }) + + it("evaluate -> error code", () => { + const params = { datasets: 2342353 } + requestExample(evaluate, params, null, 'post', 111902) + }) + it("evaluate -> normal return", () => { + const datasets = [2342353, 2345] + const gt = 234234 + const params = { projectId: 25343, datasets, gt, confidence: 0.6 } + const expected = datasets.reduce((prev, ds) => ({ + ...prev, + [ds]: { prev_dataset_id: gt } + }), {}) + requestExample(evaluate, params, expected, 'post', 111902) + }) }) diff --git a/ymir/web/src/services/__test__/project.test.js b/ymir/web/src/services/__test__/project.test.js index 057bbf5e32..5ed9a43f00 100644 --- a/ymir/web/src/services/__test__/project.test.js +++ b/ymir/web/src/services/__test__/project.test.js @@ -5,6 +5,7 @@ import { createProject, updateProject, addExampleProject, + checkStatus, } from "../project" import { product, products, requestExample } from './func' @@ -55,6 +56,11 @@ describe("service: projects", () => { const expected = "ok" requestExample(createProject, project, expected, 'post') }) + it("checkStatus -> success", () => { + const pid = 2532432 + const expected = "ok" + requestExample(checkStatus, pid, expected) + }) it("addExampleProject -> success", () => { const project = { is_example: true, diff --git a/ymir/web/src/services/dataset.js b/ymir/web/src/services/dataset.js index 8e8c9b96fa..d1e528c48d 100644 --- a/ymir/web/src/services/dataset.js +++ b/ymir/web/src/services/dataset.js @@ -16,26 +16,40 @@ export function getDataset(id) { * @returns */ export function getDatasetByGroup(group_id) { - return request.get(`datasets/`, { params: { group_id, limit: 10000 }}) + return request.get(`datasets/`, { params: { group_id, limit: 10000 } }) } /** * get datasets * @param {object} param1 { * {number} project_id - * {number} group_id - * {number} type task type - * {number} state dataset state - * {string} name dataset name - * {number} offset query start - * {number} limit query count - * {boolean} is_desc default as true - * {string} order_by value as: id, create_datetime, asset_count, source. default as id + * {number} [group_id] + * {number} [type] task type + * {number} [state] dataset state + * {string} [name] dataset name + * {number} [offset] query start + * {number} [limit] query count + * {boolean} [visible] default as true + * {boolean} [is_desc] default as true + * {string} [order_by] value as: id, create_datetime, asset_count, source. default as id * } * @returns */ -export function queryDatasets({ project_id, group_id, type, state, name, offset = 0, limit = 10, is_desc, order_by }) { - return request.get("datasets/", { params: { project_id, group_id, type, state, name, offset, limit, is_desc, order_by } }) +export function queryDatasets({ + project_id, + group_id, + type, + state, + name, + visible = true, + offset = 0, + limit = 10, + is_desc = true, + order_by +}) { + return request.get("datasets/", { + params: { project_id, group_id, type, state, name, offset, limit, is_desc, order_by, visible } + }) } /** * get dataset groups @@ -84,7 +98,7 @@ export function getAsset(id, hash) { * @param {number} id * @returns */ - export function delDataset(id) { +export function delDataset(id) { return request({ method: "delete", url: `/datasets/${id}`, @@ -103,6 +117,36 @@ export function delDatasetGroup(id) { }) } +/** + * evalute between gt and target dataset + * @param {number} projectId project id + * @param {number} datasets evaluational datasets + * @param {number} gt ground truth dataset + * @param {number} confidence range: [0, 1] + * @returns + */ +export function evaluate({ projectId, datasets, gt, confidence }) { + return request.post(`/datasets/evaluation`, { + project_id: projectId, + other_dataset_ids: datasets, + gt_dataset_id: gt, + confidence_threshold: confidence, + }) +} + +/** + * hide datasets + * @param {number} projectId + * @param {number} ids + * @returns + */ +export function batchAct(action, projectId, ids = []) { + return request.post(`/datasets/batch`, { + project_id: projectId, + operations: ids.map(id => ({ id, action, })) + }) +} + /** * import a dataset into project * @param {object} dataset diff --git a/ymir/web/src/services/model.js b/ymir/web/src/services/model.js index 8049ec66e0..eac68e11e0 100644 --- a/ymir/web/src/services/model.js +++ b/ymir/web/src/services/model.js @@ -24,16 +24,27 @@ export function getModelVersions(group_id) { * query models * @param {object} param1 { * {number} project_id - * {number} type task type - * {number} state model state - * {string} name model name - * {number} offset query start - * {number} limit query count + * {number} [type] task type + * {number} [state] model state + * {string} [name] model name + * {boolean} [visible] hidden or not + * {number} [offset] query start + * {number} [limit] query count * } * @returns */ -export function queryModels({ project_id, type, state, name, offset = 0, limit = 10 }) { - return request.get("models/", { params: { project_id, type, state, name, offset, limit } }) +export function queryModels({ + project_id, + type, + state, + name, + order_by, + is_desc, + visible = true, + offset = 0, + limit = 10, +}) { + return request.get("models/", { params: { project_id, type, state, name, visible, order_by, is_desc, offset, limit } }) } /** @@ -83,21 +94,38 @@ export function delModelGroup(id) { }) } + +/** + * hide/restore/delete models + * @param {string} action hide/restore/delete + * @param {number} projectId + * @param {number} ids + * @returns + */ +export function batchAct(action, projectId, ids = []) { + return request.post(`/models/batch`, { + project_id: projectId, + operations: ids.map(id => ({ id, action, })) + }) +} + /** * * @param {object} param { * {string} projectId * {string} name - * {string} [url] - * {number} [modelId] model id + * {string} [path] local file path + * {string} [url] net url + * {number} [modelId] copy model id * {string} [description] * } * @returns */ -export function importModel({ projectId, name, description, url, modelId, }) { +export function importModel({ projectId, name, description, url, path, modelId, }) { return request.post('/models/importing', { project_id: projectId, - input_model_path: url, + input_model_path: path, + input_url: url, input_model_id: modelId, description, group_name: name, diff --git a/ymir/web/src/services/project.js b/ymir/web/src/services/project.js index ee1b253630..c8e3d6cbef 100644 --- a/ymir/web/src/services/project.js +++ b/ymir/web/src/services/project.js @@ -47,18 +47,12 @@ export function delProject(id) { export function createProject({ name, description, - targetIteration, - targetMap, - targetDataset, keywords, }) { return request.post("/projects/", { name, description, training_type: 1, - iteration_target: targetIteration, - map_target: targetMap, - training_dataset_count_target: targetDataset, training_keywords: keywords, }) } @@ -77,9 +71,6 @@ export function addExampleProject() { * @param {object} params * { * {string} name - * {number} targetIteration - * {number} targetMap - * {number} targetDataset * {number} strategy * {number} chunkSize * {string} description @@ -92,9 +83,7 @@ export function addExampleProject() { */ export function updateProject(id, { name, - targetIteration, - targetMap, - targetDataset, + keywords, strategy, chunkSize, description, @@ -108,9 +97,7 @@ export function updateProject(id, { url: `/projects/${id}`, data: { name, - iteration_target: targetIteration, - map_target: targetMap, - training_dataset_count_target: targetDataset, + training_keywords: keywords, mining_strategy: strategy, chunk_size: chunkSize, mining_dataset_id: miningSet, @@ -121,3 +108,12 @@ export function updateProject(id, { }, }) } + +/** + * get project status, dirty/clean + * @param {number} pid + * @returns + */ +export function checkStatus(pid) { + return request.get(`/projects/${pid}/status`) +} diff --git a/ymir/web/src/services/task.js b/ymir/web/src/services/task.js index f38eb32db1..765bf70b81 100644 --- a/ymir/web/src/services/task.js +++ b/ymir/web/src/services/task.js @@ -244,26 +244,27 @@ export function createInferenceTask({ name, projectId, datasetId, - model, + model = [], config, image, imageId, description, }) { - return createTask({ + const params = model.map(md => ({ name, type: TASKTYPES.INFERENCE, project_id: projectId, description, docker_image_config: config, parameters: { - model_id: model, + model_id: md, generate_annotations: true, dataset_id: datasetId, docker_image: image, docker_image_id: imageId, } - }) + })) + return request.post("/tasks/batch", { payloads: params }) } export function createTask(params) { diff --git a/ymir/web/src/utils/string.ts b/ymir/web/src/utils/string.ts index cdfe71bf96..8951900e15 100644 --- a/ymir/web/src/utils/string.ts +++ b/ymir/web/src/utils/string.ts @@ -13,3 +13,11 @@ export function templateString(str: string, obj: ob = {}) { return typeof obj[variable] !== 'undefined' && obj[variable] !== null ? obj[variable] : '' }) } + +export function string2Array(str: string, seprate = ',') { + if (!str) { + return + } + const arr = str.split(seprate) + return arr.map(item => Number.isNaN(Number(item)) ? item : Number(item)) +}