diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index c9b629db..02b711f8 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -10,12 +10,14 @@ jobs: autoformatter: strategy: matrix: - source-dir: ["./src/", "./examples/src/"] + source-dir: ["./src/", "./examples/src/", "./slo/src/"] include: - source-dir: "./src/" solutionFile: "YdbSdk.sln" - source-dir: "./examples/src/" solutionFile: "YdbExamples.sln" + - source-dir: "./slo/src/" + solutionFile: "src.sln" name: autoformat check runs-on: ubuntu-latest steps: @@ -37,7 +39,7 @@ jobs: inspection: strategy: matrix: - solutionPath: ["./src/YdbSdk.sln", "./examples/src/YdbExamples.sln"] + solutionPath: ["./src/YdbSdk.sln", "./examples/src/YdbExamples.sln", "./slo/src/src.sln"] runs-on: ubuntu-latest name: Inspection steps: diff --git a/.github/workflows/slo.yml b/.github/workflows/slo.yml new file mode 100644 index 00000000..0425dda0 --- /dev/null +++ b/.github/workflows/slo.yml @@ -0,0 +1,68 @@ +on: + push: + branches: [main] + pull_request: + branches: [main] + workflow_dispatch: + +name: SLO + +jobs: + test-slo: + concurrency: + group: slo-${{ github.ref }} + if: (!contains(github.event.pull_request.labels.*.name, 'no slo')) + + runs-on: ubuntu-latest + name: SLO test + permissions: + checks: write + pull-requests: write + contents: read + issues: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + if: env.DOCKER_REPO != null + env: + DOCKER_REPO: ${{ secrets.SLO_DOCKER_REPO }} + + - name: Run SLO + uses: ydb-platform/slo-tests@php-version + if: env.DOCKER_REPO != null + env: + DOCKER_REPO: ${{ secrets.SLO_DOCKER_REPO }} + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + KUBECONFIG_B64: ${{ secrets.SLO_KUBE_CONFIG }} + AWS_CREDENTIALS_B64: ${{ secrets.SLO_AWS_CREDENTIALS }} + AWS_CONFIG_B64: ${{ secrets.SLO_AWS_CONFIG }} + DOCKER_USERNAME: ${{ secrets.SLO_DOCKER_USERNAME }} + DOCKER_PASSWORD: ${{ secrets.SLO_DOCKER_PASSWORD }} + DOCKER_REPO: ${{ secrets.SLO_DOCKER_REPO }} + DOCKER_FOLDER: ${{ secrets.SLO_DOCKER_FOLDER }} + s3_endpoint: ${{ secrets.SLO_S3_ENDPOINT }} + s3_images_folder: ${{ vars.SLO_S3_IMAGES_FOLDER }} + grafana_domain: ${{ vars.SLO_GRAFANA_DOMAIN }} + # grafana_dashboard: ${{ vars.SLO_GRAFANA_DASHBOARD }} + grafana_dashboard: dca60386-0d3d-43f5-a2af-5f3fd3e3b295 + grafana_dashboard_width: 2000 + grafana_dashboard_height: 2300 + ydb_version: 'newest' + timeBetweenPhases: 30 + shutdownTime: 30 + + language_id0: 'dotnet' + workload_path0: 'slo/src' + language0: '.NET SDK' + workload_build_context0: ../.. + workload_build_options0: -f Dockerfile + + - uses: actions/upload-artifact@v3 + if: always() && env.DOCKER_REPO != null + env: + DOCKER_REPO: ${{ secrets.SLO_DOCKER_REPO }} + with: + name: slo-logs + path: logs/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 5f25bbaa..e93dbece 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,6 @@ +- Added MakeTablePath, CopyTable, CopyTables, DescribeTable methods for TableClient +- Add logging for transactions + ## v0.1.5 - Fix timeout error on create session - Fix transport error on delete session diff --git a/slo/.gitignore b/slo/.gitignore new file mode 100644 index 00000000..00868c0f --- /dev/null +++ b/slo/.gitignore @@ -0,0 +1,5 @@ +data/ +.idea/ +bin/ +obj/ + diff --git a/slo/playground/README.md b/slo/playground/README.md new file mode 100644 index 00000000..eefb5cf0 --- /dev/null +++ b/slo/playground/README.md @@ -0,0 +1,40 @@ +# SLO playground + +Playground may be used for testing SLO workloads locally + +It has several services: + +- `prometheus` - storage for metrics +- `prometheus-pushgateway` - push acceptor for prometheus +- `grafana` - provides chats for metrics +- `ydb` - local instance of ydb-database to run workload with + +## Network addresses + +- Grafana dashboard: http://localhost:3000 +- Prometheus pushgateway: http://localhost:9091 +- YDB monitoring: http://localhost:8765 +- YDB GRPC: grpc://localhost:2136 +- YDB GRPC TLS: grpcs://localhost:2135 + +## Start + +```shell +docker-compose up -d +``` + +## Stop + +```shell +docker-compose down +``` + +## Configs + +Grafana's dashboards stored in `configs/grafana/provisioning/dashboards` + +## Data + +YDB databases are not persistent + +All other data like metrics and certs stored in `data/` \ No newline at end of file diff --git a/slo/playground/configs/grafana/provisioning/dashboards/dashboard.yml b/slo/playground/configs/grafana/provisioning/dashboards/dashboard.yml new file mode 100644 index 00000000..c6784142 --- /dev/null +++ b/slo/playground/configs/grafana/provisioning/dashboards/dashboard.yml @@ -0,0 +1,6 @@ +apiVersion: 1 + +providers: + - name: 'SLO' + options: + path: /etc/grafana/provisioning/dashboards diff --git a/slo/playground/configs/grafana/provisioning/dashboards/slo.json b/slo/playground/configs/grafana/provisioning/dashboards/slo.json new file mode 100644 index 00000000..69d76bf7 --- /dev/null +++ b/slo/playground/configs/grafana/provisioning/dashboards/slo.json @@ -0,0 +1,646 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(oks[$__rate_interval]) > 0", + "hide": false, + "legendFormat": "({{sdk}}-{{sdkVersion}}) {{jobName}} OK", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "rate(not_oks[$__rate_interval]) > 0", + "hide": false, + "legendFormat": "({{sdk}}-{{sdkVersion}}) {{jobName}} not OK", + "range": true, + "refId": "C" + } + ], + "title": "SLO Requests RPS", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "histogram_quantile(1, rate(attempts_bucket[$__rate_interval]))", + "hide": false, + "legendFormat": "{{sdk}}-{{sdkVersion}} {{jobName}}-{{status}}", + "range": true, + "refId": "A" + } + ], + "title": "Attempts", + "transformations": [], + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 7, + "panels": [], + "title": "Latencies", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "latency{jobName=\"read\", status=\"ok\"} > 0", + "legendFormat": "{{sdk}}-{{sdkVersion}}-p{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Read Latencies (OK)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 10 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "latency{jobName=\"write\", status=\"ok\"} > 0", + "legendFormat": "{{sdk}}-{{sdkVersion}}-p{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Write Latencies (OK)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 18 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "latency{jobName=\"read\", status=\"err\"} > 0", + "legendFormat": "{{sdk}}-{{sdkVersion}}-p{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Read Latencies (NOT OK)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 18 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "builder", + "expr": "latency{jobName=\"write\", status=\"err\"} > 0", + "legendFormat": "{{sdk}}-{{sdkVersion}}-p{{quantile}}", + "range": true, + "refId": "A" + } + ], + "title": "Write Latencies (NOT OK)", + "type": "timeseries" + } + ], + "refresh": "", + "revision": 1, + "schemaVersion": 38, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "filters": [], + "hide": 0, + "label": "", + "name": "filter", + "skipUrlSync": false, + "type": "adhoc" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "SLO", + "uid": "7CzMl5t4k", + "version": 1, + "weekStart": "" +} diff --git a/slo/playground/configs/grafana/provisioning/datasources/datasource.yml b/slo/playground/configs/grafana/provisioning/datasources/datasource.yml new file mode 100644 index 00000000..0b62b9c3 --- /dev/null +++ b/slo/playground/configs/grafana/provisioning/datasources/datasource.yml @@ -0,0 +1,11 @@ +apiVersion: 1 + +datasources: + - name: prometheus + type: prometheus + access: proxy + orgId: 1 + url: http://prometheus:9090 + basicAuth: false + isDefault: true + editable: true diff --git a/slo/playground/configs/prometheus/prometheus.yml b/slo/playground/configs/prometheus/prometheus.yml new file mode 100644 index 00000000..281b390b --- /dev/null +++ b/slo/playground/configs/prometheus/prometheus.yml @@ -0,0 +1,8 @@ +global: + scrape_interval: 1s + evaluation_interval: 1s + +scrape_configs: + - job_name: 'slo' + static_configs: + - targets: ['prometheus-pushgateway:9091'] diff --git a/slo/playground/docker-compose.yml b/slo/playground/docker-compose.yml new file mode 100644 index 00000000..dabdb5ba --- /dev/null +++ b/slo/playground/docker-compose.yml @@ -0,0 +1,124 @@ +version: '2.1' + +networks: + monitor-net: + driver: bridge + +services: + prometheus: + image: prom/prometheus:v2.44.0 + container_name: prometheus + user: "$UID:$GID" + volumes: + - ./configs/prometheus:/etc/prometheus + - ../data/prometheus:/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--storage.tsdb.retention.time=200h' + - '--web.enable-lifecycle' + restart: unless-stopped + ports: + - "9090:9090" + networks: + - monitor-net + + prometheus-pushgateway: + image: prom/pushgateway:v1.6.0 + container_name: prometheus-pushgateway + ports: + - "9091:9091" + networks: + - monitor-net + + grafana: + image: grafana/grafana:9.5.3 + container_name: grafana + user: "$UID:$GID" + volumes: + - ./configs/grafana/provisioning:/etc/grafana/provisioning + - ../data/grafana:/var/lib/grafana + environment: + - GF_SECURITY_ADMIN_USER=admin + - GF_SECURITY_ADMIN_PASSWORD=passw0rD + restart: unless-stopped + ports: + - "3000:3000" + networks: + - monitor-net + + ydb: + image: cr.yandex/yc/yandex-docker-local-ydb:23.1 + container_name: ydb + environment: + - GRPC_TLS_PORT=2135 + - GRPC_PORT=2136 + - MON_PORT=8765 + - YDB_USE_IN_MEMORY_PDISKS=true + - YDB_DEFAULT_LOG_LEVEL=NOTICE + ports: + - "2135:2135" + - "2136:2136" + - "8765:8765" + volumes: + - ../data/ydb_certs:/ydb_certs + networks: + - monitor-net + + slo-create: + build: + context: ../.. + dockerfile: slo/src/Dockerfile + command: + - 'create' + - 'http://ydb:2136' + - '/local' + - '--table-name' + - 'slo-dotnet' + - '--min-partitions-count' + - '6' + - '--max-partitions-count' + - '1000' + - '--partition-size' + - '1' + - '--initial-data-count' + - '1000' + networks: + - monitor-net + depends_on: + ydb: + condition: service_healthy + + slo-run: + build: + context: ../.. + dockerfile: slo/src/Dockerfile + command: + - 'run' + - 'http://ydb:2136' + - '/local' + - '--prom-pgw' + - 'http://prometheus-pushgateway:9091' + - '--table-name' + - 'slo-dotnet' + networks: + - monitor-net + depends_on: + slo-create: + condition: service_completed_successfully + + slo-cleanup: + build: + context: ../.. + dockerfile: slo/src/Dockerfile + command: + - 'cleanup' + - 'http://ydb:2136' + - '/local' + - '--table-name' + - 'slo-dotnet' + networks: + - monitor-net + depends_on: + slo-run: + condition: service_completed_successfully diff --git a/slo/src/Cli/Cli.cs b/slo/src/Cli/Cli.cs new file mode 100644 index 00000000..f19dfc63 --- /dev/null +++ b/slo/src/Cli/Cli.cs @@ -0,0 +1,148 @@ +using System.CommandLine; + +namespace slo.Cli; + +internal static class Cli +{ + private static readonly Argument EndpointArgument = new( + "endpoint", + "YDB endpoint to connect to"); + + private static readonly Argument DbArgument = new( + "db", + "YDB database to connect to"); + + private static readonly Option TableOption = new( + new[] { "-t", "--table-name" }, + () => "testingTable", + "table name to create\n "); + + private static readonly Option WriteTimeoutOption = new( + "--write-timeout", + () => 10000, + "write timeout milliseconds"); + + + private static readonly Option MinPartitionsCountOption = new( + "--min-partitions-count", + () => 6, + "minimum amount of partitions in table"); + + private static readonly Option MaxPartitionsCountOption = new( + "--max-partitions-count", + () => 1000, + "maximum amount of partitions in table"); + + private static readonly Option PartitionSizeOption = new( + "--partition-size", + () => 1, + "partition size in mb"); + + private static readonly Option InitialDataCountOption = new( + new[] { "-c", "--initial-data-count" }, + () => 1000, + "amount of initially created rows"); + + + private static readonly Option PromPgwOption = new( + "--prom-pgw", + "minimum amount of partitions in table") { IsRequired = true }; + + private static readonly Option ReportPeriodOption = new( + "--report-period", + () => 250, + "prometheus push period in milliseconds"); + + private static readonly Option ReadRpsOption = new( + "--read-rps", + () => 1000, + "read RPS"); + + private static readonly Option ReadTimeoutOption = new( + "--read-timeout", + () => 10000, + "read timeout milliseconds"); + + private static readonly Option WriteRpsOption = new( + "--write-rps", + () => 100, + "write RPS"); + + private static readonly Option TimeOption = new( + "--time", + () => 140, + "run time in seconds"); + + private static readonly Option ShutdownTimeOption = new( + "--shutdown-time", + () => 30, + "time to wait before force kill workers"); + + private static readonly Command CreateCommand = new( + "create", + "creates table in database") + { + EndpointArgument, + DbArgument, + TableOption, + MinPartitionsCountOption, + MaxPartitionsCountOption, + PartitionSizeOption, + InitialDataCountOption, + WriteTimeoutOption + }; + + + private static readonly Command CleanupCommand = new( + "cleanup", + "drops table in database") + { + EndpointArgument, + DbArgument, + TableOption, + WriteTimeoutOption + }; + + private static readonly Command RunCommand = new( + "run", + "runs workload (read and write to table with sets RPS)") + { + EndpointArgument, + DbArgument, + TableOption, + InitialDataCountOption, + PromPgwOption, + ReportPeriodOption, + ReadRpsOption, + ReadTimeoutOption, + WriteRpsOption, + WriteTimeoutOption, + TimeOption, + ShutdownTimeOption + }; + + private static readonly RootCommand RootCommand = new("SLO app") + { + CreateCommand, CleanupCommand, RunCommand + }; + + internal static async Task Run(string[] args) + { + CreateCommand.SetHandler( + async createConfig => { await CliCommands.Create(createConfig); }, + new CreateConfigBinder(EndpointArgument, DbArgument, TableOption, MinPartitionsCountOption, + MaxPartitionsCountOption, PartitionSizeOption, InitialDataCountOption, WriteTimeoutOption) + ); + + CleanupCommand.SetHandler( + async cleanUpConfig => { await CliCommands.CleanUp(cleanUpConfig); }, + new CleanUpConfigBinder(EndpointArgument, DbArgument, TableOption, WriteTimeoutOption) + ); + + RunCommand.SetHandler(async runConfig => { await CliCommands.Run(runConfig); }, + new RunConfigBinder(EndpointArgument, DbArgument, TableOption, InitialDataCountOption, PromPgwOption, + ReportPeriodOption, ReadRpsOption, ReadTimeoutOption, WriteRpsOption, WriteTimeoutOption, TimeOption, + ShutdownTimeOption)); + return await RootCommand.InvokeAsync(args); + } +} \ No newline at end of file diff --git a/slo/src/Cli/CliCommands.cs b/slo/src/Cli/CliCommands.cs new file mode 100644 index 00000000..f7251be3 --- /dev/null +++ b/slo/src/Cli/CliCommands.cs @@ -0,0 +1,96 @@ +using Prometheus; +using slo.Jobs; + +namespace slo.Cli; + +public static class CliCommands +{ + internal static async Task Create(CreateConfig config) + { + Console.WriteLine(config); + + await using var client = await Client.CreateAsync(config.Endpoint, config.Db, config.TableName); + + const int maxCreateAttempts = 10; + for (var i = 0; i < maxCreateAttempts; i++) + { + try + { + await client.Init(config.InitialDataCount, + config.PartitionSize, + config.MinPartitionsCount, + config.MaxPartitionsCount, + TimeSpan.FromMilliseconds(config.WriteTimeout)); + break; + } + catch (Exception e) + { + Console.WriteLine(e); + Thread.Sleep(millisecondsTimeout: 1000); + } + } + } + + internal static async Task CleanUp(CleanUpConfig config) + { + Console.WriteLine(config); + + await using var client = await Client.CreateAsync(config.Endpoint, config.Db, config.TableName); + + await client.CleanUp(TimeSpan.FromMilliseconds(config.WriteTimeout)); + } + + internal static async Task Run(RunConfig config) + { + var promPgwEndpoint = $"{config.PromPgw}/metrics"; + const string job = "workload-dotnet"; + + await using var client = await Client.CreateAsync(config.Endpoint, config.Db, config.TableName); + + await client.Init(config.InitialDataCount, 1, 6, 1000, TimeSpan.FromMilliseconds(config.WriteTimeout)); + + Console.WriteLine(config.PromPgw); + + await MetricReset(promPgwEndpoint, job); + using var prometheus = new MetricPusher(promPgwEndpoint, job, intervalMilliseconds: config.ReportPeriod); + + prometheus.Start(); + + var duration = TimeSpan.FromSeconds(config.Time); + + var readJob = new ReadJob( + client, + new RateLimitedCaller( + config.ReadRps, + duration + ), + TimeSpan.FromMilliseconds(config.ReadTimeout)); + + var writeJob = new WriteJob( + client, + new RateLimitedCaller( + config.WriteRps, + duration + ), + TimeSpan.FromMilliseconds(config.WriteTimeout)); + + var readThread = new Thread(readJob.Start); + var writeThread = new Thread(writeJob.Start); + + readThread.Start(); + writeThread.Start(); + await Task.Delay(duration + TimeSpan.FromSeconds(config.ShutdownTime)); + readThread.Join(); + writeThread.Join(); + + await prometheus.StopAsync(); + await MetricReset(promPgwEndpoint, job); + } + + private static async Task MetricReset(string promPgwEndpoint, string job) + { + var deleteUri = $"{promPgwEndpoint}/job/{job}"; + using var httpClient = new HttpClient(); + await httpClient.DeleteAsync(deleteUri); + } +} \ No newline at end of file diff --git a/slo/src/Cli/ConfigBinders.cs b/slo/src/Cli/ConfigBinders.cs new file mode 100644 index 00000000..a4b32493 --- /dev/null +++ b/slo/src/Cli/ConfigBinders.cs @@ -0,0 +1,125 @@ +using System.CommandLine; +using System.CommandLine.Binding; + +namespace slo.Cli; + +internal class CreateConfigBinder : BinderBase +{ + private readonly Argument _dbArgument; + private readonly Argument _endpointArgument; + private readonly Option _initialDataCountOption; + private readonly Option _maxPartitionsCountOption; + private readonly Option _minPartitionsCountOption; + private readonly Option _partitionSizeOption; + private readonly Option _tableOption; + private readonly Option _writeTimeoutOption; + + public CreateConfigBinder(Argument endpointArgument, Argument dbArgument, + Option tableOption, Option minPartitionsCountOption, Option maxPartitionsCountOption, + Option partitionSizeOption, Option initialDataCountOption, Option writeTimeoutOption) + { + _endpointArgument = endpointArgument; + _dbArgument = dbArgument; + _tableOption = tableOption; + _minPartitionsCountOption = minPartitionsCountOption; + _maxPartitionsCountOption = maxPartitionsCountOption; + _partitionSizeOption = partitionSizeOption; + _initialDataCountOption = initialDataCountOption; + _writeTimeoutOption = writeTimeoutOption; + } + + protected override CreateConfig GetBoundValue(BindingContext bindingContext) + { + return new CreateConfig( + bindingContext.ParseResult.GetValueForArgument(_endpointArgument), + bindingContext.ParseResult.GetValueForArgument(_dbArgument), + bindingContext.ParseResult.GetValueForOption(_tableOption)!, + bindingContext.ParseResult.GetValueForOption(_minPartitionsCountOption), + bindingContext.ParseResult.GetValueForOption(_maxPartitionsCountOption), + bindingContext.ParseResult.GetValueForOption(_partitionSizeOption), + bindingContext.ParseResult.GetValueForOption(_initialDataCountOption), + bindingContext.ParseResult.GetValueForOption(_writeTimeoutOption) + ); + } +} + +internal class CleanUpConfigBinder : BinderBase +{ + private readonly Argument _dbArgument; + private readonly Argument _endpointArgument; + private readonly Option _tableOption; + private readonly Option _writeTimeoutOption; + + public CleanUpConfigBinder(Argument endpointArgument, Argument dbArgument, + Option tableOption, Option writeTimeoutOption) + { + _endpointArgument = endpointArgument; + _dbArgument = dbArgument; + _tableOption = tableOption; + _writeTimeoutOption = writeTimeoutOption; + } + + protected override CleanUpConfig GetBoundValue(BindingContext bindingContext) + { + return new CleanUpConfig( + bindingContext.ParseResult.GetValueForArgument(_endpointArgument), + bindingContext.ParseResult.GetValueForArgument(_dbArgument), + bindingContext.ParseResult.GetValueForOption(_tableOption)!, + bindingContext.ParseResult.GetValueForOption(_writeTimeoutOption) + ); + } +} + +internal class RunConfigBinder : BinderBase +{ + private readonly Argument _dbArgument; + private readonly Argument _endpointArgument; + private readonly Option _initialDataCountOption; + private readonly Option _promPgwOption; + private readonly Option _readRpsOption; + private readonly Option _readTimeoutOption; + private readonly Option _reportPeriodOption; + private readonly Option _shutdownTimeOption; + private readonly Option _tableOption; + private readonly Option _timeOption; + private readonly Option _writeRpsOption; + private readonly Option _writeTimeoutOption; + + public RunConfigBinder(Argument endpointArgument, Argument dbArgument, + Option tableOption, Option initialDataCountOption, Option promPgwOption, + Option reportPeriodOption, Option readRpsOption, Option readTimeoutOption, + Option writeRpsOption, Option writeTimeoutOption, Option timeOption, + Option shutdownTimeOption) + { + _endpointArgument = endpointArgument; + _dbArgument = dbArgument; + _tableOption = tableOption; + _initialDataCountOption = initialDataCountOption; + _promPgwOption = promPgwOption; + _reportPeriodOption = reportPeriodOption; + _readRpsOption = readRpsOption; + _readTimeoutOption = readTimeoutOption; + _writeRpsOption = writeRpsOption; + _writeTimeoutOption = writeTimeoutOption; + _timeOption = timeOption; + _shutdownTimeOption = shutdownTimeOption; + } + + protected override RunConfig GetBoundValue(BindingContext bindingContext) + { + return new RunConfig( + bindingContext.ParseResult.GetValueForArgument(_endpointArgument), + bindingContext.ParseResult.GetValueForArgument(_dbArgument), + bindingContext.ParseResult.GetValueForOption(_tableOption)!, + bindingContext.ParseResult.GetValueForOption(_initialDataCountOption), + bindingContext.ParseResult.GetValueForOption(_promPgwOption)!, + bindingContext.ParseResult.GetValueForOption(_reportPeriodOption), + bindingContext.ParseResult.GetValueForOption(_readRpsOption), + bindingContext.ParseResult.GetValueForOption(_readTimeoutOption), + bindingContext.ParseResult.GetValueForOption(_writeRpsOption), + bindingContext.ParseResult.GetValueForOption(_writeTimeoutOption), + bindingContext.ParseResult.GetValueForOption(_timeOption), + bindingContext.ParseResult.GetValueForOption(_shutdownTimeOption) + ); + } +} \ No newline at end of file diff --git a/slo/src/Cli/Configs.cs b/slo/src/Cli/Configs.cs new file mode 100644 index 00000000..d5ad8d95 --- /dev/null +++ b/slo/src/Cli/Configs.cs @@ -0,0 +1,9 @@ +namespace slo.Cli; + +internal record CreateConfig(string Endpoint, string Db, string TableName, int MinPartitionsCount, + int MaxPartitionsCount, int PartitionSize, int InitialDataCount, int WriteTimeout); + +internal record CleanUpConfig(string Endpoint, string Db, string TableName, int WriteTimeout); + +internal record RunConfig(string Endpoint, string Db, string TableName, int InitialDataCount, string PromPgw, + int ReportPeriod, int ReadRps, int ReadTimeout, int WriteRps, int WriteTimeout, int Time, int ShutdownTime); \ No newline at end of file diff --git a/slo/src/Client.cs b/slo/src/Client.cs new file mode 100644 index 00000000..810ca198 --- /dev/null +++ b/slo/src/Client.cs @@ -0,0 +1,115 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Logging.Abstractions; +using Ydb.Sdk; +using Ydb.Sdk.Services.Table; + +namespace slo; + +public class Client : IAsyncDisposable +{ + public readonly Executor Executor; + public readonly string TableName; + + private readonly ServiceProvider _serviceProvider; + private readonly Driver _driver; + private readonly TableClient _tableClient; + + private readonly Semaphore _semaphore; + + private Client(string tableName, Executor executor, ServiceProvider serviceProvider, Driver driver, + TableClient tableClient, uint sessionPoolLimit) + { + TableName = tableName; + Executor = executor; + _serviceProvider = serviceProvider; + _driver = driver; + _tableClient = tableClient; + _semaphore = new Semaphore((int)sessionPoolLimit, (int)sessionPoolLimit); + } + + public async Task Init(int initialDataCount, int partitionSize, int minPartitionsCount, int maxPartitionsCount, + TimeSpan timeout) + { + await Executor.ExecuteSchemeQuery( + Queries.GetCreateQuery(TableName, partitionSize, minPartitionsCount, maxPartitionsCount), + timeout); + + await DataGenerator.LoadMaxId(TableName, Executor); + + var tasks = new List { Capacity = initialDataCount }; + + for (var i = 0; i < initialDataCount; i++) + { + await CallFuncWithSessionPoolLimit(() => Executor.ExecuteDataQuery( + Queries.GetWriteQuery(TableName), + DataGenerator.GetUpsertData(), + timeout: timeout + )); + } + + await Task.WhenAll(tasks); + } + + public async Task CleanUp(TimeSpan timeout) + { + await Executor.ExecuteSchemeQuery(Queries.GetDropQuery(TableName), timeout); + } + + private static ServiceProvider GetServiceProvider() + { + return new ServiceCollection() + .AddLogging(configure => configure.AddConsole().SetMinimumLevel(LogLevel.Information)) + .BuildServiceProvider(); + } + + public static async Task CreateAsync(string endpoint, string db, string tableName, + uint sessionPoolLimit = 100) + { + var driverConfig = new DriverConfig( + endpoint, + db + ); + + var serviceProvider = GetServiceProvider(); + var loggerFactory = serviceProvider.GetService(); + + loggerFactory ??= NullLoggerFactory.Instance; + var driver = await Driver.CreateInitialized(driverConfig, loggerFactory); + + var tableClient = new TableClient(driver, new TableClientConfig(new SessionPoolConfig(sessionPoolLimit))); + + var executor = new Executor(tableClient); + + var table = new Client(tableName, executor, serviceProvider, driver, tableClient, sessionPoolLimit); + + return table; + } + + public Task CallFuncWithSessionPoolLimit(Func func) + { + _semaphore.WaitOne(); + + async Task FuncWithRelease() + { + try + { + await func(); + } + finally + { + _semaphore.Release(); + } + } + + _ = FuncWithRelease(); + return Task.CompletedTask; + } + + public async ValueTask DisposeAsync() + { + _tableClient.Dispose(); + await _driver.DisposeAsync(); + await _serviceProvider.DisposeAsync(); + } +} \ No newline at end of file diff --git a/slo/src/DataGenerator.cs b/slo/src/DataGenerator.cs new file mode 100644 index 00000000..745b712a --- /dev/null +++ b/slo/src/DataGenerator.cs @@ -0,0 +1,35 @@ +using Ydb.Sdk.Value; + +namespace slo; + +public static class DataGenerator +{ + private static readonly Random Random = new(); + + public static int MaxId { get; private set; } + + public static async Task LoadMaxId(string tableName, Executor executor) + { + var response = await executor.ExecuteDataQuery(Queries.GetLoadMaxIdQuery(tableName)); + var row = response.Result.ResultSets[0].Rows[0]; + var value = row[0]; + MaxId = (int?)value.GetOptionalUint64() ?? 0; + } + + public static Dictionary GetUpsertData() + { + MaxId++; + return new Dictionary + { + { "$id", YdbValue.MakeUint64((ulong)MaxId) }, + { + "$payload_str", + YdbValue.MakeUtf8(string.Join("", Enumerable + .Repeat(0, Random.Next(20, 40)) + .Select(_ => (char)new Random().Next(127)))) + }, + { "$payload_double", YdbValue.MakeDouble(Random.NextDouble()) }, + { "$payload_timestamp", YdbValue.MakeTimestamp(DateTime.Now) } + }; + } +} \ No newline at end of file diff --git a/slo/src/Dockerfile b/slo/src/Dockerfile new file mode 100644 index 00000000..3b582784 --- /dev/null +++ b/slo/src/Dockerfile @@ -0,0 +1,20 @@ +FROM mcr.microsoft.com/dotnet/sdk:7.0 AS build + +COPY ../ /src +RUN ls /src + + +WORKDIR /src/slo/src +RUN ls +RUN dotnet restore *.sln +RUN dotnet publish *.sln -c release -o /app --no-restore -f net6.0 + +##################### + +FROM mcr.microsoft.com/dotnet/runtime:6.0 AS run + +WORKDIR /app + +COPY --from=build /app ./ + +ENTRYPOINT ["./slo"] diff --git a/slo/src/Executor.cs b/slo/src/Executor.cs new file mode 100644 index 00000000..590b92ae --- /dev/null +++ b/slo/src/Executor.cs @@ -0,0 +1,71 @@ +using Prometheus; +using Ydb.Sdk.Services.Table; +using Ydb.Sdk.Value; + +namespace slo; + +public class Executor +{ + private readonly TableClient _tableClient; + + public Executor(TableClient tableClient) + { + _tableClient = tableClient; + } + + public async Task ExecuteSchemeQuery(string query, TimeSpan? timeout = null) + { + var response = await _tableClient.SessionExec( + async session => await session.ExecuteSchemeQuery(query, + new ExecuteSchemeQuerySettings { OperationTimeout = timeout, TransportTimeout = timeout * 1.1 })); + response.Status.EnsureSuccess(); + } + + public async Task ExecuteDataQuery( + string query, + Dictionary? parameters = null, + TimeSpan? timeout = null, + Histogram? attemptsHistogram = null, + Gauge? errorsGauge = null) + + { + var txControl = TxControl.BeginSerializableRW().Commit(); + + var querySettings = new ExecuteDataQuerySettings + { OperationTimeout = timeout, TransportTimeout = timeout * 1.1 }; + + var attempts = 0; + + var response = await _tableClient.SessionExec( + async session => + { + attempts++; + var response = parameters == null + ? await session.ExecuteDataQuery( + query, + txControl, + querySettings) + : await session.ExecuteDataQuery( + query, + txControl, + parameters, + querySettings); + if (!response.Status.IsSuccess) + { + errorsGauge?.WithLabels(Utils.GetResonseStatusName(response.Status.StatusCode), "retried").Inc(); + Console.WriteLine(response.Status); + } + + return response; + }); + attemptsHistogram?.WithLabels(response.Status.IsSuccess ? "ok" : "err").Observe(attempts); + if (!response.Status.IsSuccess) + { + errorsGauge?.WithLabels(Utils.GetResonseStatusName(response.Status.StatusCode), "finally").Inc(); + } + + response.Status.EnsureSuccess(); + + return (ExecuteDataQueryResponse)response; + } +} \ No newline at end of file diff --git a/slo/src/Jobs/Job.cs b/slo/src/Jobs/Job.cs new file mode 100644 index 00000000..59cc2be2 --- /dev/null +++ b/slo/src/Jobs/Job.cs @@ -0,0 +1,107 @@ +using System.Diagnostics; +using Prometheus; +using Ydb.Sdk; + +namespace slo.Jobs; + +public abstract class Job +{ + private readonly Gauge _inFlightGauge; + + private readonly Gauge _okGauge; + private readonly Gauge _notOkGauge; + + private readonly Summary _latencySummary; + + private readonly RateLimitedCaller _rateLimitedCaller; + protected readonly TimeSpan Timeout; + + protected readonly Histogram AttemptsHistogram; + protected readonly Gauge ErrorsGauge; + protected readonly Random Random = new(); + + protected readonly Client Client; + + protected Job(Client client, RateLimitedCaller rateLimitedCaller, string jobName, TimeSpan timeout) + { + Client = client; + _rateLimitedCaller = rateLimitedCaller; + Timeout = timeout; + + var metricFactory = Metrics.WithLabels(new Dictionary + { + { "jobName", jobName }, + { "sdk", "dotnet" }, + { "sdkVersion", Environment.Version.ToString() } + }); + + _okGauge = metricFactory.CreateGauge("oks", "Count of OK"); + _notOkGauge = metricFactory.CreateGauge("not_oks", "Count of not OK"); + _inFlightGauge = metricFactory.CreateGauge("in_flight", "amount of requests in flight"); + + _latencySummary = metricFactory.CreateSummary( + "latency", + "Latencies (OK)", + new[] { "status" }, + new SummaryConfiguration + { + MaxAge = TimeSpan.FromSeconds(15), + Objectives = new QuantileEpsilonPair[] + { + new(0.5, 0.05), + new(0.99, 0.005), + new(0.999, 0.0005) + } + } + ); + + AttemptsHistogram = metricFactory.CreateHistogram( + "attempts", + "summary of amount for request", + new[] { "status" }, + new HistogramConfiguration { Buckets = Histogram.LinearBuckets(1, 1, 10) }); + + ErrorsGauge = metricFactory.CreateGauge("errors", "amount of errors", new[] { "class", "in" }); + + foreach (var statusCode in Enum.GetValues()) + { + ErrorsGauge.WithLabels(Utils.GetResonseStatusName(statusCode), "retried").IncTo(0); + ErrorsGauge.WithLabels(Utils.GetResonseStatusName(statusCode), "finally").IncTo(0); + } + } + + public async void Start() + { + await _rateLimitedCaller.StartCalling( + () => Client.CallFuncWithSessionPoolLimit( + async () => await DoJob()), + _inFlightGauge); + } + + private async Task DoJob() + { + _inFlightGauge.Inc(); + var sw = Stopwatch.StartNew(); + try + { + await PerformQuery(); + sw.Stop(); + + _latencySummary.WithLabels("ok").Observe(sw.ElapsedMilliseconds); + _okGauge.Inc(); + _inFlightGauge.Dec(); + } + catch (Exception e) + { + Console.WriteLine(e); + sw.Stop(); + + _latencySummary.WithLabels("err").Observe(sw.ElapsedMilliseconds); + _notOkGauge.Inc(); + _inFlightGauge.Dec(); + throw; + } + } + + protected abstract Task PerformQuery(); +} \ No newline at end of file diff --git a/slo/src/Jobs/ReadJob.cs b/slo/src/Jobs/ReadJob.cs new file mode 100644 index 00000000..4508c781 --- /dev/null +++ b/slo/src/Jobs/ReadJob.cs @@ -0,0 +1,28 @@ +using Ydb.Sdk.Value; + +namespace slo.Jobs; + +internal class ReadJob : Job +{ + public ReadJob(Client client, RateLimitedCaller rateLimitedCaller, TimeSpan timeout) : + base(client, rateLimitedCaller, "read", timeout) + { + } + + + protected override async Task PerformQuery() + { + var parameters = new Dictionary + { + { "$id", YdbValue.MakeUint64((ulong)Random.Next(DataGenerator.MaxId)) } + }; + + await Client.Executor.ExecuteDataQuery( + Queries.GetReadQuery(Client.TableName), + parameters, + Timeout, + AttemptsHistogram, + ErrorsGauge + ); + } +} \ No newline at end of file diff --git a/slo/src/Jobs/WriteJob.cs b/slo/src/Jobs/WriteJob.cs new file mode 100644 index 00000000..45c4861e --- /dev/null +++ b/slo/src/Jobs/WriteJob.cs @@ -0,0 +1,23 @@ +namespace slo.Jobs; + +internal class WriteJob : Job +{ + public WriteJob(Client client, RateLimitedCaller rateLimitedCaller, TimeSpan timeout) : + base(client, rateLimitedCaller, "write", timeout) + { + } + + + protected override async Task PerformQuery() + { + var parameters = DataGenerator.GetUpsertData(); + + await Client.Executor.ExecuteDataQuery( + Queries.GetWriteQuery(Client.TableName), + parameters, + Timeout, + AttemptsHistogram, + ErrorsGauge + ); + } +} \ No newline at end of file diff --git a/slo/src/Program.cs b/slo/src/Program.cs new file mode 100644 index 00000000..9f41679d --- /dev/null +++ b/slo/src/Program.cs @@ -0,0 +1,3 @@ +using slo.Cli; + +return await Cli.Run(args); \ No newline at end of file diff --git a/slo/src/Queries.cs b/slo/src/Queries.cs new file mode 100644 index 00000000..26d71c4c --- /dev/null +++ b/slo/src/Queries.cs @@ -0,0 +1,58 @@ +namespace slo; + +public static class Queries +{ + public static string GetCreateQuery(string tableName, int partitionSize, int minPartitionsCount, + int maxPartitionsCount) + { + return $@" +CREATE TABLE `{tableName}` ( + `hash` UINT64, + `id` UINT64, + `payload_str` UTF8, + `payload_double` DOUBLE, + `payload_timestamp` TIMESTAMP, + `payload_hash` UINT64, + PRIMARY KEY (`hash`, `id`) +) +WITH ( + AUTO_PARTITIONING_BY_SIZE = ENABLED, + AUTO_PARTITIONING_PARTITION_SIZE_MB = {partitionSize}, + AUTO_PARTITIONING_MIN_PARTITIONS_COUNT = {minPartitionsCount}, + AUTO_PARTITIONING_MAX_PARTITIONS_COUNT = {maxPartitionsCount} +); +"; + } + + public static string GetDropQuery(string tableName) + { + return $"DROP TABLE `{tableName}`"; + } + + public static string GetLoadMaxIdQuery(string tableName) + { + return $"SELECT MAX(id) as max_id FROM `{tableName}`"; + } + + public static string GetReadQuery(string tableName) + { + return $@" +DECLARE $id AS Uint64; +SELECT id, payload_str, payload_double, payload_timestamp, payload_hash +FROM `{tableName}` +WHERE id = $id AND hash = Digest::NumericHash($id) +"; + } + + public static string GetWriteQuery(string tableName) + { + return $@" +DECLARE $id AS Uint64; +DECLARE $payload_str AS Utf8; +DECLARE $payload_double AS Double; +DECLARE $payload_timestamp AS Timestamp; +INSERT INTO `{tableName}` (id, hash, payload_str, payload_double, payload_timestamp) +VALUES ($id, Digest::NumericHash($id), $payload_str, $payload_double, $payload_timestamp) +"; + } +} \ No newline at end of file diff --git a/slo/src/README.md b/slo/src/README.md new file mode 100644 index 00000000..7ce80dbd --- /dev/null +++ b/slo/src/README.md @@ -0,0 +1,130 @@ +# SLO workload + +SLO is the type of test where app based on ydb-sdk is tested against falling YDB cluster nodes, tablets, network +(that is possible situations for distributed DBs with hundreds of nodes) + +### Usage: + +It has 3 commands: + +- `create` - creates table in database +- `cleanup` - drops table in database +- `run` - runs workload (read and write to table with sets RPS) + +### Run examples with all arguments: + +create: + +`slo create grpcs://ydb.cool.example.com:2135 /some/folder -t tableName +--min-partitions-count 6 --max-partitions-count 1000 --partition-size 1 -с 1000 +--write-timeout 10000` + +cleanup: + +`slo cleanup grpcs://ydb.cool.example.com:2135 /some/folder -t tableName` + +run: + +`slo create run grpcs://ydb.cool.example.com:2135 /some/folder -t tableName +--prom-pgw http://prometheus-pushgateway:9091 -report-period 250 +--read-rps 1000 --read-timeout 10000 +--write-rps 100 --write-timeout 10000 +--time 600 --shutdown-time 30` + +## Arguments for commands: + +### create +`slo create [ []] [options]` + +``` +Arguments: + YDB endpoint to connect to + YDB database to connect to + +Options: + -t, --table-name table name to create + [default: testingTable] + --min-partitions-count minimum amount of partitions in table [default: 6] + --max-partitions-count maximum amount of partitions in table [default: 1000] + --partition-size partition size in mb [default: 1] + -c, --initial-data-count amount of initially created rows [default: 1000] + --write-timeout write timeout milliseconds [default: 10000] +``` + +### cleanup +`slo cleanup [ []] [options]` + +``` +Arguments: + YDB endpoint to connect to + YDB database to connect to + +Options: + -t, --table-name table name to create + [default: testingTable] + --write-timeout write timeout milliseconds [default: 10000] +``` + +### run +`slo run [ []] [options]` + +``` +Arguments: + YDB endpoint to connect to + YDB database to connect to + +Options: + -t, --table-name table name to create + [default: testingTable] + --prom-pgw (REQUIRED) minimum amount of partitions in table + --report-period prometheus push period in milliseconds [default: 250] + --read-rps read RPS [default: 1000] + --read-timeout read timeout milliseconds [default: 10000] + --write-rps write RPS [default: 100] + --time