diff --git a/.github/scripts/algolia.py b/.github/scripts/algolia.py new file mode 100644 index 00000000000..5071ea58006 --- /dev/null +++ b/.github/scripts/algolia.py @@ -0,0 +1,118 @@ +import os +from re import S +import sys +import json +from bs4 import BeautifulSoup +from algoliasearch.search_client import SearchClient + +url = "docs.dapr.io" +if len(sys.argv) > 1: + starting_directory = os.path.join(os.getcwd(), str(sys.argv[1])) +else: + starting_directory = os.getcwd() + +ALGOLIA_APP_ID = os.getenv('ALGOLIA_APP_ID') +ALGOLIA_API_KEY = os.getenv('ALGOLIA_API_WRITE_KEY') +ALGOLIA_INDEX_NAME = os.getenv('ALGOLIA_INDEX_NAME') + +client = SearchClient.create(ALGOLIA_APP_ID, ALGOLIA_API_KEY) +index = client.init_index(ALGOLIA_INDEX_NAME) + +excluded_files = [ + "404.html", +] + +exluded_directories = [ + "zh-hans", +] + +rankings = { + "Getting started": 0, + "Concepts": 100, + "Developing applications": 200, + "Operations": 300, + "Reference": 400, + "Contributing": 500, + "Home": 600 +} + +def scan_directory(directory: str, pages: list): + if os.path.basename(directory) in exluded_directories: + print(f'Skipping directory: {directory}') + return + for file in os.listdir(directory): + path = os.path.join(directory, file) + if os.path.isfile(path): + if file.endswith(".html") and file not in excluded_files: + if '' not in open(path, encoding="utf8").read(): + print(f'Indexing: {path}') + pages.append(path) + else: + print(f'Skipping hidden page: {path}') + else: + scan_directory(path, pages) + +def parse_file(path: str): + data = {} + data["hierarchy"] = {} + data["rank"] = 999 + data["subrank"] = 99 + data["type"] = "lvl2" + data["lvl0"] = "" + data["lvl1"] = "" + data["lvl2"] = "" + data["lvl3"] = "" + text = "" + subrank = 0 + with open(path, "r", errors='ignore') as file: + content = file.read() + soup = BeautifulSoup(content, "html.parser") + for meta in soup.find_all("meta"): + if meta.get("name") == "description": + data["lvl2"] = meta.get("content") + data["hierarchy"]["lvl1"] = meta.get("content") + elif meta.get("property") == "og:title": + data["lvl0"] = meta.get("content") + data["hierarchy"]["lvl0"] = meta.get("content") + data["hierarchy"]["lvl2"] = meta.get("content") + elif meta.get("property") == "og:url": + data["url"] = meta.get("content") + data["path"] = meta.get("content").split(url)[1] + data["objectID"] = meta.get("content").split(url)[1] + breadcrumbs = soup.find_all("li", class_="breadcrumb-item") + try: + subrank = len(breadcrumbs) + data["subrank"] = subrank + except: + subrank = 99 + data["subrank"] = 99 + for bc in breadcrumbs: + section = bc.text.strip() + data["lvl1"] = section + data["hierarchy"]["lvl0"] = section + try: + data["rank"] = rankings[section] + subrank + except: + print(f"Rank not found for section {section}") + data["rank"] = 998 + break + for p in soup.find_all("p"): + if p.text != "": + text = text + p.text + data["text"] = text + return data + +def index_payload(payload): + res = index.replace_all_objects(payload) + res.wait() + + +if __name__ == "__main__": + pages = [] + payload = [] + scan_directory(starting_directory, pages) + for page in pages: + data = parse_file(page) + if "objectID" in data: + payload.append(data) + index_payload(payload) diff --git a/.github/workflows/website-root.yml b/.github/workflows/website-root.yml index ceaad7f627a..1f8e503e4c2 100644 --- a/.github/workflows/website-root.yml +++ b/.github/workflows/website-root.yml @@ -1,43 +1,75 @@ name: Azure Static Web App Root on: + workflow_dispatch: push: branches: - - v1.10 + - v1.11 pull_request: types: [opened, synchronize, reopened, closed] branches: - - v1.10 + - v1.11 + +concurrency: + # Cancel the previously triggered build for only PR build. + group: website-${{ github.event.pull_request.number || github.sha }} + cancel-in-progress: true jobs: build_and_deploy_job: - if: github.event_name == 'push' || (github.event_name == 'pull_request' && github.event.action != 'closed') + name: Build Hugo Website + if: github.event.action != 'closed' runs-on: ubuntu-latest - name: Build and Deploy Job + env: + SWA_BASE: 'proud-bay-0e9e0e81e' + HUGO_ENV: production steps: - - uses: actions/checkout@v3 + - name: Checkout docs repo + uses: actions/checkout@v3 + with: + submodules: true + - name: Setup Node + uses: actions/setup-node@v2 + with: + node-version: '14' + - name: Setup Hugo + uses: peaceiris/actions-hugo@v2.5.0 with: - submodules: recursive - fetch-depth: 0 + hugo-version: 0.102.3 + extended: true - name: Setup Docsy - run: cd daprdocs && git submodule update --init --recursive && sudo npm install -D --save autoprefixer && sudo npm install -D --save postcss-cli - - name: Build And Deploy - id: builddeploy + run: | + cd daprdocs + git submodule update --init --recursive + sudo npm install -D --save autoprefixer + sudo npm install -D --save postcss-cli + - name: Build Hugo Website + run: | + cd daprdocs + git config --global --add safe.directory /github/workspace + if [ $GITHUB_EVENT_NAME == 'pull_request' ]; then + STAGING_URL="https://${SWA_BASE}-${{github.event.number}}.westus2.azurestaticapps.net/" + fi + hugo ${STAGING_URL+-b "$STAGING_URL"} + - name: Deploy docs site uses: Azure/static-web-apps-deploy@v1 - env: - HUGO_ENV: production - HUGO_VERSION: "0.100.2" with: azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }} - skip_deploy_on_missing_secrets: true - repo_token: ${{ secrets.GITHUB_TOKEN }} # Used for Github integrations (i.e. PR comments) + repo_token: ${{ secrets.GITHUB_TOKEN }} action: "upload" - app_location: "/daprdocs" - app_build_command: "git config --global --add safe.directory /github/workspace && hugo" - output_location: "public" - skip_api_build: true + app_location: "daprdocs/public/" + api_location: "daprdocs/public/" + output_location: "" + skip_app_build: true + skip_deploy_on_missing_secrets: true + - name: Upload Hugo artifacts + uses: actions/upload-artifact@v3 + with: + name: hugo_build + path: ./daprdocs/public/ + if-no-files-found: error - close_pull_request_job: + close_staging_site: if: github.event_name == 'pull_request' && github.event.action == 'closed' runs-on: ubuntu-latest name: Close Pull Request Job @@ -48,3 +80,30 @@ jobs: with: azure_static_web_apps_api_token: ${{ secrets.AZURE_STATIC_WEB_APPS_API_TOKEN_PROUD_BAY_0E9E0E81E }} action: "close" + skip_deploy_on_missing_secrets: true + + algolia_index: + name: Index site for Algolia + if: github.event_name == 'push' + needs: ['build_and_deploy_job'] + runs-on: ubuntu-latest + env: + ALGOLIA_APP_ID: ${{ secrets.ALGOLIA_APP_ID }} + ALGOLIA_API_WRITE_KEY: ${{ secrets.ALGOLIA_API_WRITE_KEY }} + ALGOLIA_INDEX_NAME: daprdocs + steps: + - name: Checkout docs repo + uses: actions/checkout@v2 + with: + submodules: false + - name: Download Hugo artifacts + uses: actions/download-artifact@v3 + with: + name: hugo_build + path: site/ + - name: Install Python packages + run: | + pip install --upgrade bs4 + pip install --upgrade 'algoliasearch>=2.0,<3.0' + - name: Index site + run: python ./.github/scripts/algolia.py ./site diff --git a/README.md b/README.md index 2dd61a89ea9..11ec2756e4d 100644 --- a/README.md +++ b/README.md @@ -14,8 +14,8 @@ The following branches are currently maintained: | Branch | Website | Description | | ------------------------------------------------------------ | -------------------------- | ------------------------------------------------------------------------------------------------ | -| [v1.10](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. | -| [v1.11](https://github.com/dapr/docs/tree/v1.11) (pre-release) | https://v1-11.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.11+ go here. | +| [v1.11](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. | +| [v1.12](https://github.com/dapr/docs/tree/v1.12) (pre-release) | https://v1-12.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.12+ go here. | For more information visit the [Dapr branch structure](https://docs.dapr.io/contributing/docs-contrib/contributing-docs/#branch-guidance) document. diff --git a/daprdocs/assets/scss/_code.scss b/daprdocs/assets/scss/_code.scss index dd05e51bf1b..49ad9c8b36a 100644 --- a/daprdocs/assets/scss/_code.scss +++ b/daprdocs/assets/scss/_code.scss @@ -1,38 +1,12 @@ // Code formatting. -.copy-code-button { - color: #272822; - background-color: #FFF; - border-color: #0D2192; - border: 2px solid; - border-radius: 3px 3px 0px 0px; - - /* right-align */ - display: block; - margin-left: auto; - margin-right: 0; - - margin-bottom: -2px; - padding: 3px 8px; - font-size: 0.8em; +.highlight .copy-icon { + position: absolute; + right: 20px; + top: 18px; + opacity: 0.7; } -.copy-code-button:hover { - cursor: pointer; - background-color: #F2F2F2; -} - -.copy-code-button:focus { - /* Avoid an ugly focus outline on click in Chrome, - but darken the button for accessibility. - See https://stackoverflow.com/a/25298082/1481479 */ - background-color: #E6E6E6; - outline: 0; -} - -.copy-code-button:active { - background-color: #D9D9D9; -} .highlight pre { /* Avoid pushing up the copy buttons. */ @@ -40,25 +14,31 @@ } .td-content { - // Highlighted code. + + // Highlighted code. .highlight { @extend .card; - + margin: 0rem 0; padding: 0rem; margin-bottom: 2rem; max-width: 100%; - + + border: none; + pre { margin: 0; padding: 1rem; + border-radius: 10px; } } // Inline code - p code, li > code, table code { + p code, + li>code, + table code { color: inherit; padding: 0.2em 0.4em; margin: 0; @@ -78,11 +58,11 @@ word-wrap: normal; background-color: $gray-100; padding: $spacer; - + max-width: 100%; - > code { - background-color: inherit !important; + >code { + background-color: inherit !important; padding: 0; margin: 0; font-size: 100%; diff --git a/daprdocs/config.toml b/daprdocs/config.toml index ca415054a66..8090c7b7da8 100644 --- a/daprdocs/config.toml +++ b/daprdocs/config.toml @@ -1,5 +1,5 @@ # Site Configuration -baseURL = "https://v1-11.docs.dapr.io" +baseURL = "https://docs.dapr.io" title = "Dapr Docs" theme = "docsy" disableFastRender = true @@ -171,17 +171,20 @@ github_subdir = "daprdocs" github_branch = "v1.11" # Versioning -version_menu = "v1.11 (preview)" +version_menu = "v1.11 (latest)" version = "v1.11" archived_version = false url_latest_version = "https://docs.dapr.io" [[params.versions]] - version = "v1.11 (preview)" + version = "v1.12 (preview)" url = "#" [[params.versions]] - version = "v1.10 (latest)" + version = "v1.11 (latest)" url = "https://docs.dapr.io" +[[params.versions]] + version = "v1.10" + url = "https://v1-10.docs.dapr.io" [[params.versions]] version = "v1.9" url = "https://v1-9.docs.dapr.io" @@ -203,27 +206,6 @@ url_latest_version = "https://docs.dapr.io" [[params.versions]] version = "v1.3" url = "https://v1-3.docs.dapr.io" -[[params.versions]] - version = "v1.2" - url = "https://v1-2.docs.dapr.io" -[[params.versions]] - version = "v1.1" - url = "https://v1-1.docs.dapr.io" -[[params.versions]] - version = "v1.0" - url = "https://v1-0.docs.dapr.io" -[[params.versions]] - version = "v0.11" - url = "https://v0-11.docs.dapr.io" -[[params.versions]] - version = "v0.10" - url = "https://github.com/dapr/docs/tree/v0.10.0" -[[params.versions]] - version = "v0.9" - url = "https://github.com/dapr/docs/tree/v0.9.0" -[[params.versions]] - version = "v0.8" - url = "https://github.com/dapr/docs/tree/v0.8.0" # UI Customization [params.ui] diff --git a/daprdocs/content/en/concepts/building-blocks-concept.md b/daprdocs/content/en/concepts/building-blocks-concept.md index 9ab26a3695c..4719626f3c6 100644 --- a/daprdocs/content/en/concepts/building-blocks-concept.md +++ b/daprdocs/content/en/concepts/building-blocks-concept.md @@ -6,15 +6,15 @@ weight: 200 description: "Modular best practices accessible over standard HTTP or gRPC APIs" --- -A [building block]({{< ref building-blocks >}}) is an HTTP or gRPC API that can be called from your code and uses one or more Dapr components. - -Building blocks address common challenges in building resilient, microservices applications and codify best practices and patterns. Dapr consists of a set of building blocks, with extensibility to add new building blocks. +A [building block]({{< ref building-blocks >}}) is an HTTP or gRPC API that can be called from your code and uses one or more Dapr components. Dapr consists of a set of API building blocks, with extensibility to add new building blocks. Dapr's building blocks: +- Address common challenges in building resilient, microservices applications +- Codify best practices and patterns The diagram below shows how building blocks expose a public API that is called from your code, using components to implement the building blocks' capability. -The following are the building blocks provided by Dapr: +Dapr provides the following building blocks: @@ -25,7 +25,6 @@ The following are the building blocks provided by Dapr: | [**Publish and subscribe**]({{< ref "pubsub-overview.md" >}}) | `/v1.0/publish` `/v1.0/subscribe`| Pub/Sub is a loosely coupled messaging pattern where senders (or publishers) publish messages to a topic, to which subscribers subscribe. Dapr supports the pub/sub pattern between applications. | [**Bindings**]({{< ref "bindings-overview.md" >}}) | `/v1.0/bindings` | A binding provides a bi-directional connection to an external cloud/on-premise service or system. Dapr allows you to invoke the external service through the Dapr binding API, and it allows your application to be triggered by events sent by the connected service. | [**Actors**]({{< ref "actors-overview.md" >}}) | `/v1.0/actors` | An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the virtual actor pattern which provides a single-threaded programming model and where actors are garbage collected when not in use. -| [**Observability**]({{< ref "observability-concept.md" >}}) | `N/A` | Dapr system components and runtime emit metrics, logs, and traces to debug, operate and monitor Dapr system services, components and user applications. | [**Secrets**]({{< ref "secrets-overview.md" >}}) | `/v1.0/secrets` | Dapr provides a secrets building block API and integrates with secret stores such as public cloud stores, local stores and Kubernetes to store the secrets. Services can call the secrets API to retrieve secrets, for example to get a connection string to a database. | [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | `/v1.0/configuration` | The Configuration API enables you to retrieve and subscribe to application configuration items for supported configuration stores. This enables an application to retrieve specific configuration information, for example, at start up or when configuration changes are made in the store. | [**Distributed lock**]({{< ref "distributed-lock-api-overview.md" >}}) | `/v1.0-alpha1/lock` | The distributed lock API enables you to take a lock on a resource so that multiple instances of an application can access the resource without conflicts and provide consistency guarantees. diff --git a/daprdocs/content/en/concepts/components-concept.md b/daprdocs/content/en/concepts/components-concept.md index c54b1a82b7d..b7248d65684 100644 --- a/daprdocs/content/en/concepts/components-concept.md +++ b/daprdocs/content/en/concepts/components-concept.md @@ -11,7 +11,7 @@ Dapr uses a modular design where functionality is delivered as a component. Each You can contribute implementations and extend Dapr's component interfaces capabilities via: - The [components-contrib repository](https://github.com/dapr/components-contrib) -- [Pluggable components]({{}}). +- [Pluggable components]({{< ref "components-concept.md#built-in-and-pluggable-components" >}}). A building block can use any combination of components. For example, the [actors]({{< ref "actors-overview.md" >}}) and the [state management]({{< ref "state-management-overview.md" >}}) building blocks both use [state components](https://github.com/dapr/components-contrib/tree/master/state). @@ -19,6 +19,10 @@ As another example, the [pub/sub]({{< ref "pubsub-overview.md" >}}) building blo You can get a list of current components available in the hosting environment using the `dapr components` CLI command. +{{% alert title="Note" color="primary" %}} +For any component that returns data to the app, it is recommended to set the memory capacity of the Dapr sidecar accordingly (process or container) to avoid potential OOM panics. For example in Docker use the `--memory` option. For Kubernetes, use the `dapr.io/sidecar-memory-limit` annotation. For processes this depends on the OS and/or process orchestration tools.* +{{% /alert %}} + ## Component specification Each component has a specification (or spec) that it conforms to. Components are configured at design-time with a YAML file which is stored in either: diff --git a/daprdocs/content/en/concepts/dapr-services/sidecar.md b/daprdocs/content/en/concepts/dapr-services/sidecar.md index 77997387a87..1d783b78f14 100644 --- a/daprdocs/content/en/concepts/dapr-services/sidecar.md +++ b/daprdocs/content/en/concepts/dapr-services/sidecar.md @@ -49,25 +49,31 @@ For a detailed list of all available arguments run `daprd --help` or see this [t daprd --app-id myapp ``` -2. Specify the port your application is listening to +1. Specify the port your application is listening to ```bash daprd --app-id --app-port 5000 ``` -3. If you are using several custom resources and want to specify the location of the resource definition files, use the `--resources-path` argument: +1. If you are using several custom resources and want to specify the location of the resource definition files, use the `--resources-path` argument: ```bash daprd --app-id myapp --resources-path ``` -4. Enable collection of Prometheus metrics while running your app +1. If you've organized your components and other resources (for example, resiliency policies, subscriptions, or configuration) into separate folders or a shared folder, you can specify multiple resource paths: + + ```bash + daprd --app-id myapp --resources-path --resources-path + ``` + +1. Enable collection of Prometheus metrics while running your app ```bash daprd --app-id myapp --enable-metrics ``` -5. Listen to IPv4 and IPv6 loopback only +1. Listen to IPv4 and IPv6 loopback only ```bash daprd --app-id myapp --dapr-listen-addresses '127.0.0.1,[::1]' diff --git a/daprdocs/content/en/concepts/observability-concept.md b/daprdocs/content/en/concepts/observability-concept.md index 270be27cbf6..f7635236c11 100644 --- a/daprdocs/content/en/concepts/observability-concept.md +++ b/daprdocs/content/en/concepts/observability-concept.md @@ -7,42 +7,68 @@ description: > Observe applications through tracing, metrics, logs and health --- -When building an application, understanding how the system is behaving is an important part of operating it - this includes having the ability to observe the internal calls of an application, gauging its performance and becoming aware of problems as soon as they occur. This is challenging for any system, but even more so for a distributed system comprised of multiple microservices where a flow, made of several calls, may start in one microservice but continue in another. Observability is critical in production environments, but also useful during development to understand bottlenecks, improve performance and perform basic debugging across the span of microservices. +When building an application, understanding the system behavior is an important, yet challenging part of operating it, such as: +- Observing the internal calls of an application +- Gauging its performance +- Becoming aware of problems as soon as they occur -While some data points about an application can be gathered from the underlying infrastructure (for example memory consumption, CPU usage), other meaningful information must be collected from an "application-aware" layer–one that can show how an important series of calls is executed across microservices. This usually means a developer must add some code to instrument an application for this purpose. Often, instrumentation code is simply meant to send collected data such as traces and metrics to observability tools or services that can help store, visualize and analyze all this information. +This can be particularly challenging for a distributed system comprised of multiple microservices, where a flow made of several calls may start in one microservice and continue in another. -Having to maintain this code, which is not part of the core logic of the application, is a burden on the developer, sometimes requiring understanding the observability tools' APIs, using additional SDKs etc. This instrumentation may also add to the portability challenges of an application, which may require different instrumentation depending on where the application is deployed. For example, different cloud providers offer different observability tools and an on-premises deployment might require a self-hosted solution. +Observability into your application is critical in production environments, and can be useful during development to: +- Understand bottlenecks +- Improve performance +- Perform basic debugging across the span of microservices + +While some data points about an application can be gathered from the underlying infrastructure (memory consumption, CPU usage), other meaningful information must be collected from an "application-aware" layer – one that can show how an important series of calls is executed across microservices. Typically, you'd add some code to instrument an application, which simply sends collected data (such as traces and metrics) to observability tools or services that can help store, visualize, and analyze all this information. + +Maintaining this instrumentation code, which is not part of the core logic of the application, requires understanding the observability tools' APIs, using additional SDKs, etc. This instrumentation may also present portability challenges for your application, requiring different instrumentation depending on where the application is deployed. For example: +- Different cloud providers offer different observability tools +- An on-premises deployment might require a self-hosted solution ## Observability for your application with Dapr -When building an application which leverages Dapr API building blocks to perform service-to-service calls and pub/sub messaging, Dapr offers an advantage with respect to [distributed tracing]({{}}). Because this inter-service communication flows through the Dapr runtime (or "sidecar"), Dapr is in a unique position to offload the burden of application-level instrumentation. +When you leverage Dapr API building blocks to perform service-to-service calls, pub/sub messaging, and other APIs, Dapr offers an advantage with respect to [distributed tracing]({{< ref tracing >}}). Since this inter-service communication flows through the Dapr runtime (or "sidecar"), Dapr is in a unique position to offload the burden of application-level instrumentation. ### Distributed tracing -Dapr can be [configured to emit tracing data]({{}}), and because Dapr does so using the widely adopted protocols of [Open Telemetry (OTEL)](https://opentelemetry.io/) and [Zipkin](https://zipkin.io), it can be easily integrated with multiple observability tools. +Dapr can be [configured to emit tracing data]({{< ref setup-tracing.md >}}) using the widely adopted protocols of [Open Telemetry (OTEL)](https://opentelemetry.io/) and [Zipkin](https://zipkin.io). This makes it easily integrated with multiple observability tools. Distributed tracing with Dapr ### Automatic tracing context generation -Dapr uses [W3C tracing]({{}}) specification for tracing context, included as part Open Telemetry (OTEL), to generate and propagate the context header for the application or propagate user-provided context headers. This means that you get tracing by default with Dapr. +Dapr uses the [W3C tracing]({{< ref tracing >}}) specification for tracing context, included as part Open Telemetry (OTEL), to generate and propagate the context header for the application or propagate user-provided context headers. This means that you get tracing by default with Dapr. ## Observability for the Dapr sidecar and control plane -You also want to be able to observe Dapr itself, by collecting metrics on performance, throughput and latency and logs emitted by the Dapr sidecar, as well as the Dapr control plane services. Dapr sidecars have a health endpoint that can be probed to indicate their health status. +You can also observe Dapr itself, by: +- Generating logs emitted by the Dapr sidecar and the Dapr control plane services +- Collecting metrics on performance, throughput, and latency +- Using health endpoints probes to indicate the Dapr sidecar health status Dapr sidecar metrics, logs and health checks ### Logging -Dapr generates [logs]({{}}) to provide visibility into sidecar operation and to help users identify issues and perform debugging. Log events contain warning, error, info, and debug messages produced by Dapr system services. Dapr can also be configured to send logs to collectors such as [Fluentd]({{< ref fluentd.md >}}), [Azure Monitor]({{< ref azure-monitor.md >}}), and other observability tools, so that logs can be searched and analyzed to provide insights. +Dapr generates [logs]({{< ref logs.md >}}) to: +- Provide visibility into sidecar operation +- Help users identify issues and perform debugging + +Log events contain warning, error, info, and debug messages produced by Dapr system services. You can also configure Dapr to send logs to collectors, such as [Open Telemetry Collector]({{< ref otel-collector >}}), [Fluentd]({{< ref fluentd.md >}}), [New Relic]({{< ref "operations/observability/logging/newrelic.md" >}}), [Azure Monitor]({{< ref azure-monitor.md >}}), and other observability tools, so that logs can be searched and analyzed to provide insights. ### Metrics -Metrics are the series of measured values and counts that are collected and stored over time. [Dapr metrics]({{}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and control plane. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests, etc. Dapr [control plane metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures and the health of control plane services, including CPU usage, number of actor placements made, etc. +Metrics are a series of measured values and counts collected and stored over time. [Dapr metrics]({{< ref metrics >}}) provide monitoring capabilities to understand the behavior of the Dapr sidecar and control plane. For example, the metrics between a Dapr sidecar and the user application show call latency, traffic failures, error rates of requests, etc. + +Dapr [control plane metrics](https://github.com/dapr/dapr/blob/master/docs/development/dapr-metrics.md) show sidecar injection failures and the health of control plane services, including CPU usage, number of actor placements made, etc. ### Health checks -The Dapr sidecar exposes an HTTP endpoint for [health checks]({{}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness. +The Dapr sidecar exposes an HTTP endpoint for [health checks]({{< ref sidecar-health.md >}}). With this API, user code or hosting environments can probe the Dapr sidecar to determine its status and identify issues with sidecar readiness. + +Conversely, Dapr can be configured to probe for the [health of your application]({{< ref app-health.md >}}), and react to changes in the app's health, including stopping pub/sub subscriptions and short-circuiting service invocation calls. + +## Next steps -Conversely, Dapr can be configured to probe for the [health of your application]({{}}), and react to changes in the app's health, including stopping pub/sub subscriptions and short-circuiting service invocation calls. +- [Learn more about observability in developing with Dapr]({{< ref tracing >}}) +- [Learn more about observability in operating with Dapr]({{< ref tracing >}}) \ No newline at end of file diff --git a/daprdocs/content/en/concepts/overview.md b/daprdocs/content/en/concepts/overview.md index b7eac28ab49..56624053722 100644 --- a/daprdocs/content/en/concepts/overview.md +++ b/daprdocs/content/en/concepts/overview.md @@ -7,7 +7,7 @@ description: > Introduction to the Distributed Application Runtime --- -Dapr is a portable, event-driven runtime that makes it easy for any developer to build resilient, stateless and stateful applications that run on the cloud and edge and embraces the diversity of languages and developer frameworks. +Dapr is a portable, event-driven runtime that makes it easy for any developer to build resilient, stateless, and stateful applications that run on the cloud and edge and embraces the diversity of languages and developer frameworks.
@@ -15,23 +15,32 @@ Dapr is a portable, event-driven runtime that makes it easy for any developer to ## Any language, any framework, anywhere - + -Today we are experiencing a wave of cloud adoption. Developers are comfortable with web + database application architectures, for example classic 3-tier designs, but not with microservice application architectures which are inherently distributed. It’s hard to become a distributed systems expert, nor should you have to. Developers want to focus on business logic, while leaning on the platforms to imbue their applications with scale, resiliency, maintainability, elasticity and the other attributes of cloud-native architectures. +With the current wave of cloud adoption, web + database application architectures (such as classic 3-tier designs) are trending more toward microservice application architectures, which are inherently distributed. You shouldn't have to become a distributed systems expert just to create microservices applications. -This is where Dapr comes in. Dapr codifies the *best practices* for building microservice applications into open, independent APIs called building blocks, that enable you to build portable applications with the language and framework of your choice. Each building block is completely independent and you can use one, some, or all of them in your application. +This is where Dapr comes in. Dapr codifies the *best practices* for building microservice applications into open, independent APIs called [building blocks]({{< ref "#microservice-building-blocks-for-cloud-and-edge" >}}). Dapr's building blocks: +- Enable you to build portable applications using the language and framework of your choice. +- Are completely independent +- Have no limit to how many you use in your application -Using Dapr you can incrementally migrate your existing applications to a microservices architecture, thereby adopting cloud native patterns such scale out/in, resiliency and independent deployments. +Using Dapr, you can incrementally migrate your existing applications to a microservices architecture, thereby adopting cloud native patterns such scale out/in, resiliency, and independent deployments. -In addition, Dapr is platform agnostic, meaning you can run your applications locally, on any Kubernetes cluster, on virtual or physical machines and in other hosting environments that Dapr integrates with. This enables you to build microservice applications that can run on the cloud and edge. +Dapr is platform agnostic, meaning you can run your applications: +- Locally +- On any Kubernetes cluster +- On virtual or physical machines +- In other hosting environments that Dapr integrates with. + +This enables you to build microservice applications that can run on the cloud and edge. ## Microservice building blocks for cloud and edge - + -There are many considerations when architecting microservices applications. Dapr provides best practices for common capabilities when building microservice applications that developers can use in a standard way, and deploy to any environment. It does this by providing distributed system building blocks. +Dapr provides distributed system building blocks for you to build microservice applications in a standard way and to deploy to any environment. -Each of these building block APIs is independent, meaning that you can use one, some, or all of them in your application. The following building blocks are available: +Each of these building block APIs is independent, meaning that you can use any number of them in your application. | Building Block | Description | |----------------|-------------| @@ -40,13 +49,22 @@ Each of these building block APIs is independent, meaning that you can use one, | [**Publish and subscribe**]({{< ref "pubsub-overview.md" >}}) | Publishing events and subscribing to topics between services enables event-driven architectures to simplify horizontal scalability and make them resilient to failure. Dapr provides at-least-once message delivery guarantee, message TTL, consumer groups and other advance features. | [**Resource bindings**]({{< ref "bindings-overview.md" >}}) | Resource bindings with triggers builds further on event-driven architectures for scale and resiliency by receiving and sending events to and from any external source such as databases, queues, file systems, etc. | [**Actors**]({{< ref "actors-overview.md" >}}) | A pattern for stateful and stateless objects that makes concurrency simple, with method and state encapsulation. Dapr provides many capabilities in its actor runtime, including concurrency, state, and life-cycle management for actor activation/deactivation, and timers and reminders to wake up actors. -| [**Observability**]({{< ref "observability-concept.md" >}}) | Dapr emits metrics, logs, and traces to debug and monitor both Dapr and user applications. Dapr supports distributed tracing to easily diagnose and serve inter-service calls in production using the W3C Trace Context standard and Open Telemetry to send to different monitoring tools. | [**Secrets**]({{< ref "secrets-overview.md" >}}) | The secrets management API integrates with public cloud and local secret stores to retrieve the secrets for use in application code. | [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | The configuration API enables you to retrieve and subscribe to application configuration items from configuration stores. | [**Distributed lock**]({{< ref "distributed-lock-api-overview.md" >}}) | The distributed lock API enables your application to acquire a lock for any resource that gives it exclusive access until either the lock is released by the application, or a lease timeout occurs. | [**Workflows**]({{< ref "workflow-overview.md" >}}) | The workflow API can be combined with other Dapr building blocks to define long running, persistent processes or data flows that span multiple microservices using Dapr workflows or workflow components. | [**Cryptography**]({{< ref "cryptography-overview.md" >}}) | The cryptography API provides an abstraction layer on top of security infrastructure such as key vaults. It contains APIs that allow you to perform cryptographic operations, such as encrypting and decrypting messages, without exposing keys to your applications. +### Cross-cutting APIs + +Alongside its building blocks, Dapr provides cross-cutting APIs that apply across all the build blocks you use. + +| Building Block | Description | +|----------------|-------------| +| [**Resiliency**]({{< ref "resiliency-concept.md" >}}) | Dapr provides the capability to define and apply fault tolerance resiliency policies via a resiliency spec. Supported specs define policies for resiliency patterns such as timeouts, retries/back-offs, and circuit breakers. +| [**Observability**]({{< ref "observability-concept.md" >}}) | Dapr emits metrics, logs, and traces to debug and monitor both Dapr and user applications. Dapr supports distributed tracing to easily diagnose and serve inter-service calls in production using the W3C Trace Context standard and Open Telemetry to send to different monitoring tools. +| [**Security**]({{< ref "security-concept.md" >}}) | Dapr supports in-transit encryption of communication between Dapr instances using the Dapr control plane, Sentry service. You can bring in your own certificates, or let Dapr automatically create and persist self-signed root and issuer certificates. + ## Sidecar architecture Dapr exposes its HTTP and gRPC APIs as a sidecar architecture, either as a container or as a process, not requiring the application code to include any Dapr runtime code. This makes integration with Dapr easy from other runtimes, as well as providing separation of the application logic for improved supportability. @@ -55,33 +73,41 @@ Dapr exposes its HTTP and gRPC APIs as a sidecar architecture, either as a conta ## Hosting environments -Dapr can be hosted in multiple environments, including self-hosted on a Windows/Linux/macOS machines for local development and on Kubernetes or clusters of physical or virtual machines in production. +Dapr can be hosted in multiple environments, including: +- Self-hosted on a Windows/Linux/macOS machine for local development +- On Kubernetes or clusters of physical or virtual machines in production ### Self-hosted local development -In [self-hosted mode]({{< ref self-hosted-overview.md >}}) Dapr runs as a separate sidecar process which your service code can call via HTTP or gRPC. Each running service has a Dapr runtime process (or sidecar) which is configured to use state stores, pub/sub, binding components and the other building blocks. +In [self-hosted mode]({{< ref self-hosted-overview.md >}}), Dapr runs as a separate sidecar process, which your service code can call via HTTP or gRPC. Each running service has a Dapr runtime process (or sidecar) configured to use state stores, pub/sub, binding components, and the other building blocks. -You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app) to run a Dapr-enabled application on your local machine. The diagram below show Dapr's local development environment when configured with the CLI `init` command. Try this out with the [getting started samples]({{< ref getting-started >}}). +You can use the [Dapr CLI](https://github.com/dapr/cli#launch-dapr-and-your-app) to run a Dapr-enabled application on your local machine. In the following diagram, Dapr's local development environment gets configured with the CLI `init` command. Try this out with the [getting started samples]({{< ref getting-started >}}). Architecture diagram of Dapr in self-hosted mode ### Kubernetes -Kubernetes can be used for either local development (for example with [minikube](https://minikube.sigs.k8s.io/docs/), [k3S](https://k3s.io/)) or in [production]({{< ref kubernetes >}}). In container hosting environments such as Kubernetes, Dapr runs as a sidecar container with the application container in the same pod. +Kubernetes can be used for either: +- Local development (for example, with [minikube](https://minikube.sigs.k8s.io/docs/) and [k3S](https://k3s.io/)), or +- In [production]({{< ref kubernetes >}}). + +In container hosting environments such as Kubernetes, Dapr runs as a sidecar container with the application container in the same pod. -Dapr has control plane services. The `dapr-sidecar-injector` and `dapr-operator` services provide first-class integration to launch Dapr as a sidecar container in the same pod as the service container and provide notifications of Dapr component updates provisioned in the cluster. +Dapr's `dapr-sidecar-injector` and `dapr-operator` control plane services provide first-class integration to: +- Launch Dapr as a sidecar container in the same pod as the service container +- Provide notifications of Dapr component updates provisioned in the cluster The `dapr-sentry` service is a certificate authority that enables mutual TLS between Dapr sidecar instances for secure data encryption, as well as providing identity via [Spiffe](https://spiffe.io/). For more information on the `Sentry` service, read the [security overview]({{< ref "security-concept.md#dapr-to-dapr-communication" >}}) -Deploying and running a Dapr-enabled application into your Kubernetes cluster is as simple as adding a few annotations to the deployment schemes. Visit the [Dapr on Kubernetes docs]({{< ref kubernetes >}}) +Deploying and running a Dapr-enabled application into your Kubernetes cluster is as simple as adding a few annotations to the deployment schemes. Visit the [Dapr on Kubernetes docs]({{< ref kubernetes >}}). Architecture diagram of Dapr in Kubernetes mode ### Clusters of physical or virtual machines -The Dapr control plane services can be deployed in High Availability (HA) mode to clusters of physical or virtual machines in production, for example, as shown in the diagram below. Here the Actor `Placement` and `Sentry` services are started on three different VMs to provide HA control plane. In order to provide name resolution using DNS for the applications running in the cluster, Dapr uses [Hashicorp Consul service]({{< ref setup-nr-consul >}}), also running in HA mode. +The Dapr control plane services can be deployed in high availability (HA) mode to clusters of physical or virtual machines in production. In the diagram below, the Actor `Placement` and security `Sentry` services are started on three different VMs to provide HA control plane. In order to provide name resolution using DNS for the applications running in the cluster, Dapr uses [Hashicorp Consul service]({{< ref setup-nr-consul >}}), also running in HA mode. Architecture diagram of Dapr control plane and Consul deployed to VMs in high availability mode @@ -91,17 +117,15 @@ Dapr offers a variety of SDKs and frameworks to make it easy to begin developing ### Dapr SDKs -To make using Dapr more natural for different languages, it also includes [language specific SDKs]({{}}) for: -- C++ +To make using Dapr more natural for different languages, it also includes [language specific SDKs]({{< ref sdks >}}) for: - Go - Java - JavaScript - .NET - PHP - Python -- Rust -These SDKs expose the functionality of the Dapr building blocks through a typed language API, rather than calling the http/gRPC API. This enables you to write a combination of stateless and stateful functions and actors all in the language of your choice. And because these SDKs share the Dapr runtime, you get cross-language actor and function support. +These SDKs expose the functionality of the Dapr building blocks through a typed language API, rather than calling the http/gRPC API. This enables you to write a combination of stateless and stateful functions and actors all in the language of your choice. Since these SDKs share the Dapr runtime, you get cross-language actor and function support. ### Developer frameworks @@ -120,7 +144,7 @@ Dapr can be used from any developer framework. Here are some that have been inte #### Integrations and extensions Visit the [integrations]({{< ref integrations >}}) page to learn about some of the first-class support Dapr has for various frameworks and external products, including: -- Public cloud services +- Public cloud services, like Azure and AWS - Visual Studio Code - GitHub @@ -128,6 +152,6 @@ Visit the [integrations]({{< ref integrations >}}) page to learn about some of t Dapr is designed for [operations]({{< ref operations >}}) and security. The Dapr sidecars, runtime, components, and configuration can all be managed and deployed easily and securely to match your organization's needs. -The [dashboard](https://github.com/dapr/dashboard), installed via the Dapr CLI, provides a web-based UI enabling you to see information, view logs and more for running Dapr applications. +The [dashboard](https://github.com/dapr/dashboard), installed via the Dapr CLI, provides a web-based UI enabling you to see information, view logs, and more for running Dapr applications. -The [monitoring tools support]({{< ref monitoring >}}) provides deeper visibility into the Dapr system services and side-cars and the [observability capabilities]({{}}) of Dapr provide insights into your application such as tracing and metrics. +Dapr supports [monitoring tools]({{< ref observability >}}) for deeper visibility into the Dapr system services and sidecars, while the [observability capabilities]({{< ref "observability-concept.md" >}}) of Dapr provide insights into your application, such as tracing and metrics. diff --git a/daprdocs/content/en/concepts/security-concept.md b/daprdocs/content/en/concepts/security-concept.md index 9c484668534..d7ceef443b4 100644 --- a/daprdocs/content/en/concepts/security-concept.md +++ b/daprdocs/content/en/concepts/security-concept.md @@ -211,6 +211,36 @@ The Dapr threat model is below. ## Security audit +### September 2023 + +In September 2023, Dapr completed a security audit done by Ada Logics. + +The audit was a holistic security audit with the following goals: + +- Formalize a threat model of Dapr +- Perform manual code review +- Evaluate Daprs fuzzing suite against the formalized threat model +- Carry out a SLSA review of Dapr. + +You can find the full report [here](/docs/Dapr-september-2023-security-audit-report.pdf). + +The audit found 7 issues none of which were of high or critical severity. One CVE was assigned from an issue in a 3rd-party dependency to Dapr Components Contrib + +### June 2023 + +In June 2023, Dapr completed a fuzzing audit done by Ada Logics. + +The audit achieved the following: + +- OSS-Fuzz integration +- 39 new fuzzers for Dapr +- Fuzz test coverage for Dapr Runtime, Kit and Components-contrib +- All fuzzers running continuously after the audit has completed + +You can find the full report [here](/docs/Dapr-june-2023-fuzzing-audit-report.pdf). + +3 issues were found during the audit. + ### February 2021 In February 2021, Dapr went through a 2nd security audit targeting its 1.0 release by Cure53. @@ -255,4 +285,4 @@ Visit [this page]({{< ref support-security-issues.md >}}) to report a security i ## Related links -[Operational Security]({{< ref "security.md" >}}) \ No newline at end of file +[Operational Security]({{< ref "security.md" >}}) diff --git a/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md b/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md index dd3d6c900c2..85d16302b70 100644 --- a/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md +++ b/daprdocs/content/en/contributing/docs-contrib/contributing-docs.md @@ -39,11 +39,11 @@ Style and tone conventions should be followed throughout all Dapr documentation ## Diagrams and images -Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/presentations), which includes guidance on style and icons. +Diagrams and images are invaluable visual aids for documentation pages. Diagrams are kept in a [Dapr Diagrams Deck](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/presentations), which includes guidance on style and icons. As you create diagrams for your documentation: -- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.10/daprdocs/static/images). +- Save them as high-res PNG files into the [images folder](https://github.com/dapr/docs/tree/v1.11/daprdocs/static/images). - Name your PNG files using the convention of a concept or building block so that they are grouped. - For example: `service-invocation-overview.png`. - For more information on calling out images using shortcode, see the [Images guidance](#images) section below. @@ -458,4 +458,4 @@ Steps to add a language: ## Next steps -Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}). \ No newline at end of file +Get started by copying and working from one of [the Dapr doc templates]({{< ref docs-templates >}}). diff --git a/daprdocs/content/en/developing-applications/building-blocks/_index.md b/daprdocs/content/en/developing-applications/building-blocks/_index.md index d0df0b4aac7..c1b10bbd475 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/_index.md @@ -8,4 +8,5 @@ description: "Dapr capabilities that solve common development challenges for dis Get a high-level [overview of Dapr building blocks]({{< ref building-blocks-concept >}}) in the **Concepts** section. -Diagram showing the different Dapr API building blocks \ No newline at end of file +Diagram showing the different Dapr API building blocks + diff --git a/daprdocs/content/en/developing-applications/building-blocks/actors/_index.md b/daprdocs/content/en/developing-applications/building-blocks/actors/_index.md index d3ae80d3262..8a56cf4847c 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/actors/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/actors/_index.md @@ -5,3 +5,10 @@ linkTitle: "Actors" weight: 50 description: Encapsulate code and data in reusable actor objects as a common microservices design pattern --- + +{{% alert title="More about Dapr Actors" color="primary" %}} + Learn more about how to use Dapr Actors: + - Try the [Actors quickstart]({{< ref actors-quickstart.md >}}). + - Explore actors via any of the [Dapr SDKs]({{< ref sdks >}}). + - Review the [Actors API reference documentation]({{< ref actors_api.md >}}). +{{% /alert %}} diff --git a/daprdocs/content/en/developing-applications/building-blocks/actors/actors-overview.md b/daprdocs/content/en/developing-applications/building-blocks/actors/actors-overview.md index e74e9cc4d0b..f0454d71405 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/actors/actors-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/actors/actors-overview.md @@ -20,7 +20,11 @@ Dapr includes a runtime that specifically implements the [Virtual Actor pattern] Every actor is defined as an instance of an actor type, identical to the way an object is an instance of a class. For example, there may be an actor type that implements the functionality of a calculator and there could be many actors of that type that are distributed on various nodes across a cluster. Each such actor is uniquely identified by an actor ID. - + + +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=dWNgtsp61f3Sjq0n&t=10797) demonstrates how actors in Dapr work. + + ## Actor types and IDs @@ -109,6 +113,10 @@ The functionality of timers and reminders is very similar. The main difference i This distinction allows users to trade off between light-weight but stateless timers vs. more resource-demanding but stateful reminders. +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=2_xX6mkU3UCy2Plr&t=6607) demonstrates how actor timers and reminders work. + + + - [Learn more about actor timers.]({{< ref "actors-features-concepts.md#timers" >}}) - [Learn more about actor reminders.]({{< ref "actors-features-concepts.md#reminders" >}}) - [Learn more about timer and reminder error handling and failover.]({{< ref "actors-features-concepts.md#timers-and-reminders-error-handling" >}}) diff --git a/daprdocs/content/en/developing-applications/building-blocks/bindings/_index.md b/daprdocs/content/en/developing-applications/building-blocks/bindings/_index.md index 578cfe0a9c7..d15682d7bd4 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/bindings/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/bindings/_index.md @@ -5,3 +5,12 @@ linkTitle: "Bindings" weight: 40 description: Interface with or be triggered from external systems --- + + +{{% alert title="More about Dapr Bindings" color="primary" %}} + Learn more about how to use Dapr Bindings: + - Try the [Bindings quickstart]({{< ref bindings-quickstart.md >}}). + - Explore input and output bindings via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Bindings API reference documentation]({{< ref bindings_api.md >}}). + - Browse the supported [input and output bindings component specs]({{< ref supported-bindings >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/bindings/bindings-overview.md b/daprdocs/content/en/developing-applications/building-blocks/bindings/bindings-overview.md index e98b5440edd..980a39f79a3 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/bindings/bindings-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/bindings/bindings-overview.md @@ -15,14 +15,18 @@ Using Dapr's bindings API, you can trigger your app with events coming in from e - Switch between bindings at runtime. - Build portable applications with environment-specific bindings set-up and no required code changes. -For example, with bindings, your microservice can respond to incoming Twilio/SMS messages without: +For example, with bindings, your application can respond to incoming Twilio/SMS messages without: - Adding or configuring a third-party Twilio SDK - Worrying about polling from Twilio (or using WebSockets, etc.) -{{% alert title="Note" color="primary" %}} +Diagram showing bindings + +In the above diagram: +- The input binding triggers a method on your application. +- Execute output binding operations on the component, such as `"create"`. + Bindings are developed independently of Dapr runtime. You can [view and contribute to the bindings](https://github.com/dapr/components-contrib/tree/master/bindings). -{{% /alert %}} {{% alert title="Note" color="primary" %}} If you are using the HTTP Binding, then it is preferable to use [service invocation]({{< ref service_invocation_api.md >}}) instead. Read [How-To: Invoke Non-Dapr Endpoints using HTTP]({{< ref "howto-invoke-non-dapr-endpoints.md" >}}) for more information. @@ -32,6 +36,10 @@ If you are using the HTTP Binding, then it is preferable to use [service invocat With input bindings, you can trigger your application when an event from an external resource occurs. An optional payload and metadata may be sent with the request. +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=wlmAi7BJBWS8KNK7&t=8261) demonstrates how Dapr input binding works. + + + To receive events from an input binding: 1. Define the component YAML that describes the binding type and its metadata (connection info, etc.). @@ -50,13 +58,36 @@ Read the [Create an event-driven app using input bindings guide]({{< ref howto-t With output bindings, you can invoke external resources. An optional payload and metadata can be sent with the invocation request. +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=PoA4NEqL5mqNj6Il&t=7668) demonstrates how Dapr output binding works. + + + To invoke an output binding: 1. Define the component YAML that describes the binding type and its metadata (connection info, etc.). -2. Use the HTTP endpoint or gRPC method to invoke the binding with an optional payload. +1. Use the HTTP endpoint or gRPC method to invoke the binding with an optional payload. +1. Specify an output operation. Output operations depend on the binding component you use, and can include: + - `"create"` + - `"update"` + - `"delete"` + - `"exec"` Read the [Use output bindings to interface with external resources guide]({{< ref howto-bindings.md >}}) to get started with output bindings. +## Binding directions (optional) + +You can provide the `direction` metadata field to indicate the direction(s) supported by the binding component. In doing so, the Dapr sidecar avoids the `"wait for the app to become ready"` state reducing the lifecycle dependency between the Dapr sidecar and the application: + +- `"input"` +- `"output"` +- `"input, output"` + +{{% alert title="Note" color="primary" %}} +It is highly recommended that all bindings should include the `direction` property. +{{% /alert %}} + +[See a full example of the bindings `direction` metadata.]({{< ref "bindings_api.md#binding-direction-optional" >}}) + ## Try out bindings ### Quickstarts and tutorials diff --git a/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-bindings.md b/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-bindings.md index 7a26103546f..1822f543a6d 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-bindings.md +++ b/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-bindings.md @@ -32,6 +32,8 @@ Create a new binding component named `checkout`. Within the `metadata` section, - The topic to which you'll publish the message - The broker +When creating the binding component, [specify the supported `direction` of the binding]({{< ref "bindings_api.md#binding-direction-optional" >}}). + {{< tabs "Self-Hosted (CLI)" Kubernetes >}} {{% codetab %}} @@ -59,7 +61,9 @@ spec: - name: publishTopic value: sample - name: authRequired - value: "false" + value: false + - name: direction + value: output ``` {{% /codetab %}} @@ -89,7 +93,9 @@ spec: - name: publishTopic value: sample - name: authRequired - value: "false" + value: false + - name: direction + value: output ``` {{% /codetab %}} diff --git a/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md b/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md index 215ffd05d06..56a24b0aece 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md +++ b/daprdocs/content/en/developing-applications/building-blocks/bindings/howto-triggers.md @@ -37,6 +37,8 @@ Create a new binding component named `checkout`. Within the `metadata` section, - The topic to which you'll publish the message - The broker +When creating the binding component, [specify the supported `direction` of the binding]({{< ref "bindings_api.md#binding-direction-optional" >}}). + {{< tabs "Self-Hosted (CLI)" Kubernetes >}} {{% codetab %}} @@ -64,7 +66,9 @@ spec: - name: publishTopic value: sample - name: authRequired - value: "false" + value: false + - name: direction + value: input ``` {{% /codetab %}} @@ -94,7 +98,9 @@ spec: - name: publishTopic value: sample - name: authRequired - value: "false" + value: false + - name: direction + value: input ``` {{% /codetab %}} @@ -256,15 +262,15 @@ async function start() { {{< /tabs >}} -### ACK-ing an event +### ACK an event Tell Dapr you've successfully processed an event in your application by returning a `200 OK` response from your HTTP handler. -### Rejecting an event +### Reject an event Tell Dapr the event was not processed correctly in your application and schedule it for redelivery by returning any response other than `200 OK`. For example, a `500 Error`. -### Specifying a custom route +### Specify a custom route By default, incoming events will be sent to an HTTP endpoint that corresponds to the name of the input binding. You can override this by setting the following metadata property in `binding.yaml`: diff --git a/daprdocs/content/en/developing-applications/building-blocks/configuration/_index.md b/daprdocs/content/en/developing-applications/building-blocks/configuration/_index.md index 59acf29b9ff..3974707d05c 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/configuration/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/configuration/_index.md @@ -5,3 +5,11 @@ linkTitle: "Configuration" weight: 80 description: Manage and be notified of application configuration changes --- + +{{% alert title="More about Dapr Configuration" color="primary" %}} + Learn more about how to use Dapr Configuration: + - Try the [Configuration quickstart]({{< ref configuration-quickstart.md >}}). + - Explore configuration via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Configuration API reference documentation]({{< ref configuration_api.md >}}). + - Browse the supported [configuration component specs]({{< ref supported-configuration-stores >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/cryptography/_index.md b/daprdocs/content/en/developing-applications/building-blocks/cryptography/_index.md index 7b62e737c83..664e94b6aee 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/cryptography/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/cryptography/_index.md @@ -4,4 +4,11 @@ title: "Cryptography" linkTitle: "Cryptography" weight: 110 description: "Perform cryptographic operations without exposing keys to your application" ---- \ No newline at end of file +--- + +{{% alert title="More about Dapr Cryptography" color="primary" %}} + Learn more about how to use Dapr Cryptography: + - Try the [Cryptography quickstart]({{< ref cryptography-quickstart.md >}}). + - Explore cryptography via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Browse the supported [cryptography component specs]({{< ref supported-cryptography >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/_index.md b/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/_index.md index e9364c57384..9b99245089b 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/_index.md @@ -5,3 +5,10 @@ linkTitle: "Distributed lock" weight: 90 description: Distributed locks provide mutually exclusive access to shared resources from an application. --- + +{{% alert title="More about Dapr Distributed Lock" color="primary" %}} + Learn more about how to use Dapr Distributed Lock: + - Explore distributed locks via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Distributed Lock API reference documentation]({{< ref distributed_lock_api.md >}}). + - Browse the supported [distributed locks component specs]({{< ref supported-locks >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/howto-use-distributed-lock.md b/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/howto-use-distributed-lock.md index 21dac83e2af..4a2745c3484 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/howto-use-distributed-lock.md +++ b/daprdocs/content/en/developing-applications/building-blocks/distributed-lock/howto-use-distributed-lock.md @@ -31,6 +31,7 @@ metadata: name: lockstore spec: type: lock.redis + version: v1 metadata: - name: redisHost value: localhost:6379 diff --git a/daprdocs/content/en/developing-applications/building-blocks/observability/_index.md b/daprdocs/content/en/developing-applications/building-blocks/observability/_index.md deleted file mode 100644 index 7de51a0a49e..00000000000 --- a/daprdocs/content/en/developing-applications/building-blocks/observability/_index.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -type: docs -title: "Observability" -linkTitle: "Observability" -weight: 60 -description: See and measure the message calls to components and between networked services ---- - -This section includes guides for developers in the context of observability. See other sections for a [general overview of the observability concept]({{< ref observability-concept >}}) in Dapr and for [operations guidance on monitoring]({{< ref monitoring >}}). diff --git a/daprdocs/content/en/developing-applications/building-blocks/observability/tracing-overview.md b/daprdocs/content/en/developing-applications/building-blocks/observability/tracing-overview.md deleted file mode 100644 index 38ac85d258e..00000000000 --- a/daprdocs/content/en/developing-applications/building-blocks/observability/tracing-overview.md +++ /dev/null @@ -1,118 +0,0 @@ ---- -type: docs -title: "Distributed tracing" -linkTitle: "Distributed tracing" -weight: 100 -description: "Use tracing to get visibility into your application" ---- - -Dapr uses the Open Telemetry (OTEL) and Zipkin protocols for distributed traces. OTEL is the industry standard and is the recommended trace protocol to use. - - Most observability tools support OTEL. For example [Google Cloud Operations](https://cloud.google.com/products/operations), [New Relic](https://newrelic.com), [Azure Monitor](https://azure.microsoft.com/services/monitor/), [Datadog](https://www.datadoghq.com), Instana, [Jaeger](https://www.jaegertracing.io/), and [SignalFX](https://www.signalfx.com/). - -## Scenarios -Tracing is used with service invocaton and pub/sub APIs. You can flow trace context between services that uses these APIs. - -There are two scenarios for how tracing is used: - - 1. Dapr generates the trace context and you propagate the trace context to another service. - 2. You generate the trace context and Dapr propagates the trace context to a service. - -### Propagating sequential service calls - -Dapr takes care of creating the trace headers. However, when there are more than two services, you're responsible for propagating the trace headers between them. Let's go through the scenarios with examples: - -1. Single service invocation call (`service A -> service B`) - - Dapr generates the trace headers in service A, which are then propagated from service A to service B. No further propagation is needed. - -2. Multiple sequential service invocation calls ( `service A -> service B -> service C`) - - Dapr generates the trace headers at the beginning of the request in service A, which are then propagated to service B. You are now responsible for taking the headers and propagating them to service C, since this is specific to your application. - - `service A -> service B -> propagate trace headers to -> service C` and so on to further Dapr-enabled services. - - In other words, if the app is calling to Dapr and wants to trace with an existing span (trace header), it must always propagate to Dapr (from service B to service C in this case). Dapr always propagates trace spans to an application. - -{{% alert title="Note" color="primary" %}} -There are no helper methods exposed in Dapr SDKs to propagate and retrieve trace context. You need to use HTTP/gRPC clients to propagate and retrieve trace headers through HTTP headers and gRPC metadata. -{{% /alert %}} - -3. Request is from external endpoint (for example, `from a gateway service to a Dapr-enabled service A`) - - An external gateway ingress calls Dapr, which generates the trace headers and calls service A. Service A then calls service B and further Dapr-enabled services. You must propagate the headers from service A to service B: `Ingress -> service A -> propagate trace headers -> service B`. This is similar to case 2 above. - -4. Pub/sub messages - Dapr generates the trace headers in the published message topic. These trace headers are propagated to any services listening on that topic. - -### Propagating multiple different service calls - -In the following scenarios, Dapr does some of the work for you and you need to either create or propagate trace headers. - -1. Multiple service calls to different services from single service - - When you are calling multiple services from a single service (see example below), you need to propagate the trace headers: - - ``` - service A -> service B - [ .. some code logic ..] - service A -> service C - [ .. some code logic ..] - service A -> service D - [ .. some code logic ..] - ``` - - In this case, when service A first calls service B, Dapr generates the trace headers in service A, which are then propagated to service B. These trace headers are returned in the response from service B as part of response headers. You then need to propagate the returned trace context to the next services, service C and service D, as Dapr does not know you want to reuse the same header. - -### Generating your own trace context headers from non-Daprized applications - -You may have chosen to generate your own trace context headers. -Generating your own trace context headers is more unusual and typically not required when calling Dapr. However, there are scenarios where you could specifically choose to add W3C trace headers into a service call; for example, you have an existing application that does not use Dapr. In this case, Dapr still propagates the trace context headers for you. If you decide to generate trace headers yourself, there are three ways this can be done: - -1. You can use the industry standard [OpenTelemetry SDKs](https://opentelemetry.io/docs/instrumentation/) to generate trace headers and pass these trace headers to a Dapr-enabled service. This is the preferred method. - -2. You can use a vendor SDK that provides a way to generate W3C trace headers and pass them to a Dapr-enabled service. - -3. You can handcraft a trace context following [W3C trace context specifications](https://www.w3.org/TR/trace-context/) and pass them to a Dapr-enabled service. - -## W3C trace context - -Dapr uses the standard W3C trace context headers. - -- For HTTP requests, Dapr uses `traceparent` header. -- For gRPC requests, Dapr uses `grpc-trace-bin` header. - -When a request arrives without a trace ID, Dapr creates a new one. Otherwise, it passes the trace ID along the call chain. - -Read [trace context overview]({{< ref w3c-tracing-overview >}}) for more background on W3C trace context. - -## W3C trace headers -These are the specific trace context headers that are generated and propagated by Dapr for HTTP and gRPC. - -### Trace context HTTP headers format -When propagating a trace context header from an HTTP response to an HTTP request, you copy these headers. - -#### Traceparent header -The traceparent header represents the incoming request in a tracing system in a common format, understood by all vendors. -Here’s an example of a traceparent header. - -`traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01` - - Find the traceparent fields detailed [here](https://www.w3.org/TR/trace-context/#traceparent-header). - -#### Tracestate header -The tracestate header includes the parent in a potentially vendor-specific format: - -`tracestate: congo=t61rcWkgMzE` - -Find the tracestate fields detailed [here](https://www.w3.org/TR/trace-context/#tracestate-header). - -### Trace context gRPC headers format -In the gRPC API calls, trace context is passed through `grpc-trace-bin` header. - -## Related Links - -- [Observability concepts]({{< ref observability-concept.md >}}) -- [W3C Trace Context for distributed tracing]({{< ref w3c-tracing-overview >}}) -- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) -- [Observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability) diff --git a/daprdocs/content/en/developing-applications/building-blocks/observability/w3c-tracing-overview.md b/daprdocs/content/en/developing-applications/building-blocks/observability/w3c-tracing-overview.md deleted file mode 100644 index fe168c75301..00000000000 --- a/daprdocs/content/en/developing-applications/building-blocks/observability/w3c-tracing-overview.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -type: docs -title: "Trace context" -linkTitle: "Trace context" -weight: 4000 -description: Background and scenarios for using W3C tracing with Dapr -type: docs ---- - -Dapr uses the [Open Telemetry protocol](https://opentelemetry.io/), which in turn uses the [W3C trace context](https://www.w3.org/TR/trace-context/) for distributed tracing for both service invocation and pub/sub messaging. Dapr generates and propagates the trace context information, which can be sent to observability tools for visualization and querying. - -## Background -Distributed tracing is a methodology implemented by tracing tools to follow, analyze, and debug a transaction across multiple software components. Typically, a distributed trace traverses more than one service which requires it to be uniquely identifiable. Trace context propagation passes along this unique identification. - -In the past, trace context propagation has typically been implemented individually by each different tracing vendor. In multi-vendor environments, this causes interoperability problems, such as: - -- Traces that are collected by different tracing vendors cannot be correlated as there is no shared unique identifier. -- Traces that cross boundaries between different tracing vendors can not be propagated as there is no forwarded, uniformly agreed set of identification. -- Vendor-specific metadata might be dropped by intermediaries. -- Cloud platform vendors, intermediaries, and service providers cannot guarantee to support trace context propagation as there is no standard to follow. - -In the past, these problems did not have a significant impact, as most applications were monitored by a single tracing vendor and stayed within the boundaries of a single platform provider. Today, an increasing number of applications are distributed and leverage multiple middleware services and cloud platforms. - -This transformation of modern applications called for a distributed tracing context propagation standard. The [W3C trace context specification](https://www.w3.org/TR/trace-context/) defines a universally agreed-upon format for the exchange of trace context propagation data - referred to as trace context. Trace context solves the problems described above by: - -* Providing a unique identifier for individual traces and requests, allowing trace data of multiple providers to be linked together. -* Providing an agreed-upon mechanism to forward vendor-specific trace data and avoid broken traces when multiple tracing tools participate in a single transaction. -* Providing an industry standard that intermediaries, platforms, and hardware providers can support. - -A unified approach for propagating trace data improves visibility into the behavior of distributed applications, facilitating problem and performance analysis. - -## Related Links -- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/_index.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/_index.md index a6c894a5a10..df3fb8b7a08 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/_index.md @@ -5,3 +5,11 @@ linkTitle: "Publish & subscribe" weight: 30 description: Secure, scalable messaging between services --- + +{{% alert title="More about Dapr Pub/sub" color="primary" %}} + Learn more about how to use Dapr Pub/sub: + - Try the [Pub/sub quickstart]({{< ref pubsub-quickstart.md >}}). + - Explore pub/sub via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Pub/sub API reference documentation]({{< ref pubsub_api.md >}}). + - Browse the supported [pub/sub component specs]({{< ref supported-pubsub >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md index f771a292bd3..c3ceb433307 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md @@ -658,6 +658,12 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g In order to tell Dapr that a message was processed successfully, return a `200 OK` response. If Dapr receives any other return status code than `200`, or if your app crashes, Dapr will attempt to redeliver the message following at-least-once semantics. +## Demo video + +Watch [this demo video](https://youtu.be/1dqe1k-FXJQ?si=s3gvWxRxeOsmXuE1) to learn more about pub/sub messaging with Dapr. + + + ## Next steps - Try the [pub/sub tutorial](https://github.com/dapr/quickstarts/tree/master/tutorials/pub-sub). diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md index 251d3f234c1..b6f46bb2970 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-cloudevents.md @@ -14,9 +14,15 @@ Dapr uses CloudEvents to provide additional context to the event payload, enabli - Content-type for proper deserialization of event data - Verification of sender application -## CloudEvents example +You can choose any of three methods for publish a CloudEvent via pub/sub: -A publish operation to Dapr results in a cloud event envelope containing the following fields: +1. Send a pub/sub event, which is then wrapped by Dapr in a CloudEvent envelope. +1. Replace specific CloudEvents attributes provided by Dapr by overriding the standard CloudEvent properties. +1. Write your own CloudEvent envelope as part of the pub/sub event. + +## Dapr-generated CloudEvents example + +Sending a publish operation to Dapr automatically wraps it in a CloudEvent envelope containing the following fields: - `id` - `source` @@ -30,7 +36,9 @@ A publish operation to Dapr results in a cloud event envelope containing the fol - `time` - `datacontenttype` (optional) -The following example demonstrates a cloud event generated by Dapr for a publish operation to the `orders` topic that includes a W3C `traceid` unique to the message, the `data` and the fields for the CloudEvent where the data content is serialized as JSON. +The following example demonstrates a CloudEvent generated by Dapr for a publish operation to the `orders` topic that includes: +- A W3C `traceid` unique to the message +- The `data` and the fields for the CloudEvent where the data content is serialized as JSON ```json { @@ -55,20 +63,112 @@ As another example of a v1.0 CloudEvent, the following shows data as XML content ```json { - "specversion" : "1.0", - "type" : "xml.message", - "source" : "https://example.com/message", - "subject" : "Test XML Message", - "id" : "id-1234-5678-9101", - "time" : "2020-09-23T06:23:21Z", - "datacontenttype" : "text/xml", - "data" : "User1user2hi" + "topic": "orders", + "pubsubname": "order_pub_sub", + "traceid": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01", + "tracestate": "", + "data" : "user2Order", + "id" : "id-1234-5678-9101", + "specversion" : "1.0", + "datacontenttype" : "text/xml", + "subject" : "Test XML Message", + "source" : "https://example.com/message", + "type" : "xml.message", + "time" : "2020-09-23T06:23:21Z" +} +``` + +## Replace Dapr generated CloudEvents values + +Dapr automatically generates several CloudEvent properties. You can replace these generated CloudEvent properties by providing the following optional metadata key/value: + +- `cloudevent.id`: overrides `id` +- `cloudevent.source`: overrides `source` +- `cloudevent.type`: overrides `type` +- `cloudevent.traceid`: overrides `traceid` +- `cloudevent.tracestate`: overrides `tracestate` +- `cloudevent.traceparent`: overrides `traceparent` + +The ability to replace CloudEvents properties using these metadata properties applies to all pub/sub components. + +### Example + +For example, to replace the `source` and `id` values from [the CloudEvent example above]({{< ref "#cloudevents-example" >}}) in code: + +{{< tabs "Python" ".NET" >}} + +{{% codetab %}} + +```python +with DaprClient() as client: + order = {'orderId': i} + # Publish an event/message using Dapr PubSub + result = client.publish_event( + pubsub_name='order_pub_sub', + topic_name='orders', + publish_metadata={'cloudevent.id: 'd99b228f-6c73-4e78-8c4d-3f80a043d317', cloudevent.source: 'payment'} + ) +``` + +{{% /codetab %}} + + +{{% codetab %}} + +```csharp +var order = new Order(i); +using var client = new DaprClientBuilder().Build(); + +// Override cloudevent metadata +var metadata = new Dictionary() { + { "cloudevent.source", "payment" }, + { "cloudevent.id", "d99b228f-6c73-4e78-8c4d-3f80a043d317" } } + +// Publish an event/message using Dapr PubSub +await client.PublishEventAsync("order_pub_sub", "orders", order, metadata); +Console.WriteLine("Published data: " + order); + +await Task.Delay(TimeSpan.FromSeconds(1)); ``` +{{% /codetab %}} + +{{< /tabs >}} + + +The JSON payload then reflects the new `source` and `id` values: + + +```json +{ + "topic": "orders", + "pubsubname": "order_pub_sub", + "traceid": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01", + "tracestate": "", + "data": { + "orderId": 1 + }, + "id": "d99b228f-6c73-4e78-8c4d-3f80a043d317", + "specversion": "1.0", + "datacontenttype": "application/json; charset=utf-8", + "source": "payment", + "type": "com.dapr.event.sent", + "time": "2020-09-23T06:23:21Z", + "traceparent": "00-113ad9c4e42b27583ae98ba698d54255-e3743e35ff56f219-01" +} +``` + +{{% alert title="Important" color="warning" %}} +While you can replace `traceid`/`traceparent` and `tracestate`, doing this may interfere with tracing events and report inconsistent results in tracing tools. It's recommended to use Open Telementry for distributed traces. [Learn more about distributed tracing.]({{< ref tracing-overview.md >}}) + +{{% /alert %}} + + ## Publish your own CloudEvent If you want to use your own CloudEvent, make sure to specify the [`datacontenttype`]({{< ref "pubsub-overview.md#setting-message-content-types" >}}) as `application/cloudevents+json`. + If the CloudEvent that was authored by the app does not contain the [minimum required fields](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#required-attributes) in the CloudEvent specification, the message is rejected. Dapr adds the following fields to the CloudEvent if they are missing: - `time` @@ -92,7 +192,7 @@ You can add additional fields to a custom CloudEvent that are not part of the of Publish a CloudEvent to the `orders` topic: ```bash -dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{"specversion" : "1.0", "type" : "com.dapr.cloudevent.sent", "source" : "testcloudeventspubsub", "subject" : "Cloud Events Test", "id" : "someCloudEventId", "time" : "2021-08-02T09:00:00Z", "datacontenttype" : "application/cloudevents+json", "data" : {"orderId": "100"}}' +dapr publish --publish-app-id orderprocessing --pubsub order-pub-sub --topic orders --data '{\"orderId\": \"100\"}' ``` {{% /codetab %}} diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-deadletter.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-deadletter.md index 1f46bf62b6e..97c9a0db405 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-deadletter.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-deadletter.md @@ -25,13 +25,14 @@ The diagram below is an example of how dead letter topics work. First a message The following YAML shows how to configure a subscription with a dead letter topic named `poisonMessages` for messages consumed from the `orders` topic. This subscription is scoped to an app with a `checkout` ID. ```yaml -apiVersion: dapr.io/v1alpha1 +apiVersion: dapr.io/v2alpha1 kind: Subscription metadata: name: order spec: topic: orders - route: /checkout + routes: + default: /checkout pubsubname: pubsub deadLetterTopic: poisonMessages scopes: @@ -86,13 +87,16 @@ spec: Remember to now configure a subscription to handling the dead letter topics. For example you can create another declarative subscription to receive these on the same or a different application. The example below shows the checkout application subscribing to the `poisonMessages` topic with another subscription and sending these to be handled by the `/failedmessages` endpoint. ```yaml -apiVersion: dapr.io/v1alpha1 +apiVersion: dapr.io/v2alpha1 kind: Subscription metadata: name: deadlettertopics spec: topic: poisonMessages - route: /failedMessages + routes: + rules: + - match: + path: /failedMessages pubsubname: pubsub scopes: - checkout diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md index fa633486846..41c9ac23b6c 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md @@ -13,7 +13,7 @@ Publish and subscribe (pub/sub) enables microservices to communicate with each o An intermediary message broker copies each message from a publisher's input channel to an output channel for all subscribers interested in that message. This pattern is especially useful when you need to decouple microservices from one another. - +

@@ -32,15 +32,17 @@ When using pub/sub in Dapr: 1. The pub/sub building block makes calls into a Dapr pub/sub component that encapsulates a specific message broker. 1. To receive messages on a topic, Dapr subscribes to the pub/sub component on behalf of your service with a topic and delivers the messages to an endpoint on your service when they arrive. +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=FMg2Y7bRuljKism-&t=5384) demonstrates how Dapr pub/sub works. + + + In the diagram below, a "shipping" service and an "email" service have both subscribed to topics published by a "cart" service. Each service loads pub/sub component configuration files that point to the same pub/sub message broker component; for example: Redis Streams, NATS Streaming, Azure Service Bus, or GCP pub/sub. - -

+ In the diagram below, the Dapr API posts an "order" topic from the publishing "cart" service to "order" endpoints on the "shipping" and "email" subscribing services. - -

+ [View the complete list of pub/sub components that Dapr supports]({{< ref supported-pubsub >}}). @@ -100,16 +102,29 @@ Dapr solves multi-tenancy at-scale with [namespaces for consumer groups]({{< ref ### At-least-once guarantee -Dapr guarantees at-least-once semantics for message delivery. When an application publishes a message to a topic using the pub/sub API, Dapr ensures the message is delivered *at least once* to every subscriber. +Dapr guarantees at-least-once semantics for message delivery. When an application publishes a message to a topic using the pub/sub API, Dapr ensures the message is delivered *at least once* to every subscriber. + +Even if the message fails to deliver, or your application crashes, Dapr attempts to redeliver the message until successful delivery. + +All Dapr pub/sub components support the at-least-once guarantee. ### Consumer groups and competing consumers pattern -Dapr automatically handles the burden of dealing with concepts like consumer groups and competing consumers pattern. The competing consumers pattern refers to multiple application instances using a single consumer group. When multiple instances of the same application (running same Dapr app ID) subscribe to a topic, Dapr delivers each message to *only one instance of **that** application*. This concept is illustrated in the diagram below. +Dapr handles the burden of dealing with consumer groups and the competing consumers pattern. In the competing consumers pattern, multiple application instances using a single consumer group compete for the message. Dapr enforces the competing consumer pattern when replicas use the same `app-id` without explict consumer group overrides. + +When multiple instances of the same application (with same `app-id`) subscribe to a topic, Dapr delivers each message to *only one instance of **that** application*. This concept is illustrated in the diagram below.

-Similarly, if two different applications (with different app-IDs) subscribe to the same topic, Dapr delivers each message to *only one instance of **each** application*. +Similarly, if two different applications (with different `app-id`) subscribe to the same topic, Dapr delivers each message to *only one instance of **each** application*. + +Not all Dapr pub/sub components support the competing consumer pattern. Currently, the following (non-exhaustive) pub/sub components support this: + +- [Apache Kafka]({{< ref setup-apache-kafka >}}) +- [Azure Service Bus Queues]({{< ref setup-azure-servicebus-queues >}}) +- [RabbitMQ]({{< ref setup-rabbitmq >}}) +- [Redis Streams]({{< ref setup-redis-pubsub >}}) ### Scoping topics for added security diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-raw.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-raw.md index d5a0fbe61b0..6e518fa963a 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-raw.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-raw.md @@ -141,13 +141,14 @@ $app->start(); Similarly, you can subscribe to raw events declaratively by adding the `rawPayload` metadata entry to your subscription specification. ```yaml -apiVersion: dapr.io/v1alpha1 +apiVersion: dapr.io/v2alpha1 kind: Subscription metadata: name: myevent-subscription spec: topic: deathStarStatus - route: /dsstatus + routes: + default: /dsstatus pubsubname: pubsub metadata: rawPayload: "true" diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/subscription-methods.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/subscription-methods.md index 965647b92b4..0b9e2ca2b35 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/subscription-methods.md +++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/subscription-methods.md @@ -22,13 +22,14 @@ The examples below demonstrate pub/sub messaging between a `checkout` app and an You can subscribe declaratively to a topic using an external component file. This example uses a YAML component file named `subscription.yaml`: ```yaml -apiVersion: dapr.io/v1alpha1 +apiVersion: dapr.io/v2alpha1 kind: Subscription metadata: name: order spec: topic: orders - route: /checkout + routes: + default: /checkout pubsubname: pubsub scopes: - orderprocessing @@ -186,7 +187,11 @@ The `/checkout` endpoint matches the `route` defined in the subscriptions and th ### Programmatic subscriptions -The programmatic approach returns the `routes` JSON structure within the code, unlike the declarative approach's `route` YAML structure. In the example below, you define the values found in the [declarative YAML subscription](#declarative-subscriptions) above within the application code. +The dynamic programmatic approach returns the `routes` JSON structure within the code, unlike the declarative approach's `route` YAML structure. + +> **Note:** Programmatic subscriptions are only read once during application start-up. You cannot _dynamically_ add new programmatic subscriptions, only at new ones at compile time. + +In the example below, you define the values found in the [declarative YAML subscription](#declarative-subscriptions) above within the application code. {{< tabs ".NET" Java Python JavaScript Go>}} @@ -218,7 +223,7 @@ Both of the handlers defined above also need to be mapped to configure the `dapr app.UseEndpoints(endpoints => { endpoints.MapSubscribeHandler(); -} +}); ``` {{% /codetab %}} @@ -316,6 +321,7 @@ app.listen(port, () => console.log(`consumer app listening on port ${port}!`)) ```go package main +import ( "encoding/json" "fmt" "log" diff --git a/daprdocs/content/en/developing-applications/building-blocks/secrets/_index.md b/daprdocs/content/en/developing-applications/building-blocks/secrets/_index.md index 0a82e79c559..43179c9d946 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/secrets/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/secrets/_index.md @@ -5,3 +5,11 @@ linkTitle: "Secrets management" weight: 70 description: Securely access secrets from your application --- + +{{% alert title="More about Dapr Secrets" color="primary" %}} + Learn more about how to use Dapr Secrets: + - Try the [Secrets quickstart]({{< ref secrets-quickstart.md >}}). + - Explore secrets via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Secrets API reference documentation]({{< ref secrets_api.md >}}). + - Browse the supported [secrets component specs]({{< ref supported-secret-stores >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/secrets/secrets-overview.md b/daprdocs/content/en/developing-applications/building-blocks/secrets/secrets-overview.md index 856634f216b..717a250e33c 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/secrets/secrets-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/secrets/secrets-overview.md @@ -18,6 +18,10 @@ Dapr's dedicated secrets building block API makes it easier for developers to co 1. Retrieve secrets using the Dapr secrets API in the application code. 1. Optionally, reference secrets in Dapr component files. +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=3bmNSSyIEIVSF-Ej&t=9931) demonstrates how Dapr secrets management works. + + + ## Features The secrets management API building block brings several features to your application. diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/_index.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/_index.md index 011c8b2e42f..f59f106c59f 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/_index.md @@ -5,3 +5,10 @@ linkTitle: "Service invocation" weight: 10 description: Perform direct, secure, service-to-service method calls --- + +{{% alert title="More about Dapr Service Invocation" color="primary" %}} + Learn more about how to use Dapr Service Invocation: + - Try the [Service Invocation quickstart]({{< ref serviceinvocation-quickstart.md >}}). + - Explore service invocation via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Service Invocation API reference documentation]({{< ref service_invocation_api.md >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-discover-services.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-discover-services.md index 6e70fe765dd..c3f78e4a4e0 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-discover-services.md +++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-discover-services.md @@ -3,7 +3,7 @@ type: docs title: "How-To: Invoke services using HTTP" linkTitle: "How-To: Invoke with HTTP" description: "Call between services using service invocation" -weight: 2000 +weight: 20 --- This article demonstrates how to deploy services each with an unique application ID for other services to discover and call endpoints on them using service invocation over HTTP. @@ -19,26 +19,22 @@ This article demonstrates how to deploy services each with an unique application Dapr allows you to assign a global, unique ID for your app. This ID encapsulates the state for your application, regardless of the number of instances it may have. -{{< tabs Dotnet Java Python Go JavaScript Kubernetes>}} +{{< tabs Python JavaScript ".NET" Java Go Kubernetes >}} {{% codetab %}} ```bash +dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- python3 checkout/app.py -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 dotnet run - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 dotnet run - +dapr run --app-id order-processor --app-port 8001 --app-protocol http --dapr-http-port 3501 -- python3 order-processor/app.py ``` If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`: ```bash +dapr run --app-id checkout --app-protocol https --dapr-http-port 3500 -- python3 checkout/app.py -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https dotnet run - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https dotnet run - +dapr run --app-id order-processor --app-port 8001 --app-protocol https --dapr-http-port 3501 -- python3 order-processor/app.py ``` {{% /codetab %}} @@ -46,21 +42,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g {{% codetab %}} ```bash +dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- npm start -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 mvn spring-boot:run - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 mvn spring-boot:run - +dapr run --app-id order-processor --app-port 5001 --app-protocol http --dapr-http-port 3501 -- npm start ``` If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`: ```bash +dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- npm start -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https mvn spring-boot:run - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https mvn spring-boot:run - +dapr run --app-id order-processor --app-port 5001 --dapr-http-port 3501 --app-protocol https -- npm start ``` {{% /codetab %}} @@ -68,21 +60,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g {{% codetab %}} ```bash +dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- dotnet run -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 -- python3 CheckoutService.py - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 -- python3 OrderProcessingService.py - +dapr run --app-id order-processor --app-port 7001 --app-protocol http --dapr-http-port 3501 -- dotnet run ``` If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`: ```bash +dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- dotnet run -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https -- python3 CheckoutService.py - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https -- python3 OrderProcessingService.py - +dapr run --app-id order-processor --app-port 7001 --dapr-http-port 3501 --app-protocol https -- dotnet run ``` {{% /codetab %}} @@ -90,21 +78,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g {{% codetab %}} ```bash +dapr run --app-id checkout --app-protocol http --dapr-http-port 3500 -- java -jar target/CheckoutService-0.0.1-SNAPSHOT.jar -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 go run CheckoutService.go - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 go run OrderProcessingService.go - +dapr run --app-id order-processor --app-port 9001 --app-protocol http --dapr-http-port 3501 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar ``` If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`: ```bash +dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- java -jar target/CheckoutService-0.0.1-SNAPSHOT.jar -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https go run CheckoutService.go - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https go run OrderProcessingService.go - +dapr run --app-id order-processor --app-port 9001 --dapr-http-port 3501 --app-protocol https -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar ``` {{% /codetab %}} @@ -112,21 +96,17 @@ dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-g {{% codetab %}} ```bash +dapr run --app-id checkout --dapr-http-port 3500 -- go run . -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 npm start - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 npm start - +dapr run --app-id order-processor --app-port 6006 --app-protocol http --dapr-http-port 3501 -- go run . ``` If your app uses a TLS, you can tell Dapr to invoke your app over a TLS connection by setting `--app-protocol https`: ```bash +dapr run --app-id checkout --dapr-http-port 3500 --app-protocol https -- go run . -dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https npm start - -dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https npm start - +dapr run --app-id order-processor --app-port 6006 --dapr-http-port 3501 --app-protocol https -- go run . ``` {{% /codetab %}} @@ -156,7 +136,7 @@ spec: app: -app annotations: dapr.io/enabled: "true" - dapr.io/app-id: "orderprocessingservice" + dapr.io/app-id: "order-processor" dapr.io/app-port: "6001" ... ``` @@ -173,7 +153,69 @@ To invoke an application using Dapr, you can use the `invoke` API on any Dapr in Below are code examples that leverage Dapr SDKs for service invocation. -{{< tabs Dotnet Java Python Go Javascript>}} +{{< tabs Python JavaScript ".NET" Java Go >}} + +{{% codetab %}} + +```python +#dependencies +import random +from time import sleep +import logging +import requests + +#code +logging.basicConfig(level = logging.INFO) +while True: + sleep(random.randrange(50, 5000) / 1000) + orderId = random.randint(1, 1000) + #Invoke a service + result = requests.post( + url='%s/orders' % (base_url), + data=json.dumps(order), + headers=headers + ) + logging.basicConfig(level = logging.INFO) + logging.info('Order requested: ' + str(orderId)) + logging.info('Result: ' + str(result)) +``` + +{{% /codetab %}} + +{{% codetab %}} + +```javascript +//dependencies +import axios from "axios"; + +//code +const daprHost = "127.0.0.1"; + +var main = function() { + for(var i=0;i<10;i++) { + sleep(5000); + var orderId = Math.floor(Math.random() * (1000 - 1) + 1); + start(orderId).catch((e) => { + console.error(e); + process.exit(1); + }); + } +} + + //Invoke a service + const result = await axios.post('order-processor' , "orders/" + orderId , axiosConfig); + console.log("Order requested: " + orderId); + console.log("Result: " + result.config.data); + + +function sleep(ms) { + return new Promise(resolve => setTimeout(resolve, ms)); +} + +main(); +``` + +{{% /codetab %}} {{% codetab %}} @@ -184,7 +226,6 @@ using System.Collections.Generic; using System.Net.Http; using System.Net.Http.Headers; using System.Threading.Tasks; -using Dapr.Client; using Microsoft.AspNetCore.Mvc; using System.Threading; @@ -196,14 +237,17 @@ namespace EventService static async Task Main(string[] args) { while(true) { - System.Threading.Thread.Sleep(5000); - Random random = new Random(); - int orderId = random.Next(1,1000); - using var client = new DaprClientBuilder().Build(); + await Task.Delay(5000) + var random = new Random(); + var orderId = random.Next(1,1000); //Using Dapr SDK to invoke a method - var result = client.CreateInvokeMethodRequest(HttpMethod.Get, "checkout", "checkout/" + orderId); - await client.InvokeMethodAsync(result); + var order = new Order("1"); + var orderJson = JsonSerializer.Serialize(order); + var content = new StringContent(orderJson, Encoding.UTF8, "application/json"); + + var httpClient = DaprClient.CreateInvokeHttpClient(); + await httpClient.PostAsJsonAsync($"http://order-processor/orders", content); Console.WriteLine("Order requested: " + orderId); Console.WriteLine("Result: " + result); } @@ -218,39 +262,52 @@ namespace EventService ```java //dependencies -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.domain.HttpExtension; -import org.springframework.boot.autoconfigure.SpringBootApplication; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; import java.util.Random; import java.util.concurrent.TimeUnit; //code @SpringBootApplication -public class OrderProcessingServiceApplication { - - private static final Logger log = LoggerFactory.getLogger(OrderProcessingServiceApplication.class); - - public static void main(String[] args) throws InterruptedException{ - while(true) { - TimeUnit.MILLISECONDS.sleep(5000); - Random random = new Random(); - int orderId = random.nextInt(1000-1) + 1; - DaprClient daprClient = new DaprClientBuilder().build(); - //Using Dapr SDK to invoke a method - var result = daprClient.invokeMethod( - "checkout", - "checkout/" + orderId, - null, - HttpExtension.GET, - String.class - ); - log.info("Order requested: " + orderId); - log.info("Result: " + result); - } - } +public class CheckoutServiceApplication { + private static final HttpClient httpClient = HttpClient.newBuilder() + .version(HttpClient.Version.HTTP_2) + .connectTimeout(Duration.ofSeconds(10)) + .build(); + + public static void main(String[] args) throws InterruptedException, IOException { + while (true) { + TimeUnit.MILLISECONDS.sleep(5000); + Random random = new Random(); + int orderId = random.nextInt(1000 - 1) + 1; + + // Create a Map to represent the request body + Map requestBody = new HashMap<>(); + requestBody.put("orderId", orderId); + // Add other fields to the requestBody Map as needed + + HttpRequest request = HttpRequest.newBuilder() + .POST(HttpRequest.BodyPublishers.ofString(new JSONObject(requestBody).toString())) + .uri(URI.create(dapr_url)) + .header("Content-Type", "application/json") + .header("dapr-app-id", "order-processor") + .build(); + + HttpResponse response = httpClient.send(request, HttpResponse.BodyHandlers.ofString()); + + System.out.println("Order passed: " + orderId); + TimeUnit.MILLISECONDS.sleep(1000); + + log.info("Order requested: " + orderId); + log.info("Result: " + response.body()); + } + } } ``` @@ -258,112 +315,56 @@ public class OrderProcessingServiceApplication { {{% codetab %}} -```python -#dependencies -import random -from time import sleep -import logging -from dapr.clients import DaprClient - -#code -logging.basicConfig(level = logging.INFO) -while True: - sleep(random.randrange(50, 5000) / 1000) - orderId = random.randint(1, 1000) - with DaprClient() as daprClient: - #Using Dapr SDK to invoke a method - result = daprClient.invoke_method( - "checkout", - f"checkout/{orderId}", - data=b'', - http_verb="GET" - ) - logging.basicConfig(level = logging.INFO) - logging.info('Order requested: ' + str(orderId)) - logging.info('Result: ' + str(result)) -``` - -{{% /codetab %}} - -{{% codetab %}} - ```go -//dependencies +package main + import ( - "context" + "fmt" + "io" "log" "math/rand" + "net/http" + "os" "time" - "strconv" - dapr "github.com/dapr/go-sdk/client" - ) -//code -type Order struct { - orderName string - orderNum string -} - func main() { + daprHttpPort := os.Getenv("DAPR_HTTP_PORT") + if daprHttpPort == "" { + daprHttpPort = "3500" + } + + client := &http.Client{ + Timeout: 15 * time.Second, + } + for i := 0; i < 10; i++ { time.Sleep(5000) orderId := rand.Intn(1000-1) + 1 - client, err := dapr.NewClient() + + url := fmt.Sprintf("http://localhost:%s/checkout/%v", daprHttpPort, orderId) + req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { panic(err) } - defer client.Close() - ctx := context.Background() - //Using Dapr SDK to invoke a method - result, err := client.InvokeMethod(ctx, "checkout", "checkout/" + strconv.Itoa(orderId), "get") - log.Println("Order requested: " + strconv.Itoa(orderId)) - log.Println("Result: ") - log.Println(result) - } -} -``` - -{{% /codetab %}} - -{{% codetab %}} - -```javascript -//dependencies -import { DaprClient, HttpMethod, CommunicationProtocolEnum } from '@dapr/dapr'; -//code -const daprHost = "127.0.0.1"; + // Adding target app id as part of the header + req.Header.Add("dapr-app-id", "order-processor") -var main = function() { - for(var i=0;i<10;i++) { - sleep(5000); - var orderId = Math.floor(Math.random() * (1000 - 1) + 1); - start(orderId).catch((e) => { - console.error(e); - process.exit(1); - }); - } -} + // Invoking a service + resp, err := client.Do(req) + if err != nil { + log.Fatal(err.Error()) + } -async function start(orderId) { - const client = new DaprClient({ - daprHost: daprHost, - daprPort: process.env.DAPR_HTTP_PORT, - communicationProtocol: CommunicationProtocolEnum.HTTP - }); - - //Using Dapr SDK to invoke a method - const result = await client.invoker.invoke('checkoutservice' , "checkout/" + orderId , HttpMethod.GET); - console.log("Order requested: " + orderId); - console.log("Result: " + result); -} + b, err := io.ReadAll(resp.Body) + if err != nil { + panic(err) + } -function sleep(ms) { - return new Promise(resolve => setTimeout(resolve, ms)); + fmt.Println(string(b)) + } } - -main(); ``` {{% /codetab %}} @@ -432,4 +433,4 @@ For more information on tracing and logs, see the [observability]({{< ref observ ## Related Links - [Service invocation overview]({{< ref service-invocation-overview.md >}}) -- [Service invocation API specification]({{< ref service_invocation_api.md >}}) \ No newline at end of file +- [Service invocation API specification]({{< ref service_invocation_api.md >}}) diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md index 8196bd0b1e5..4c5e0224dde 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md +++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md @@ -3,7 +3,7 @@ type: docs title: "How-To: Invoke Non-Dapr Endpoints using HTTP" linkTitle: "How-To: Invoke Non-Dapr Endpoints" description: "Call Non-Dapr endpoints from Dapr applications using service invocation" -weight: 2000 +weight: 40 --- This article demonstrates how to call a non-Dapr endpoint using Dapr over HTTP. @@ -47,7 +47,7 @@ The diagram below is an overview of how Dapr's service invocation works when inv ## Using an HTTPEndpoint resource or FQDN URL for non-Dapr endpoints There are two ways to invoke a non-Dapr endpoint when communicating either to Dapr applications or non-Dapr applications. A Dapr application can invoke a non-Dapr endpoint by providing one of the following: -- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}}) guide for an example. +- A named `HTTPEndpoint` resource, including defining an `HTTPEndpoint` resource type. See the [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}}) guide for an example. ```sh localhost:3500/v1.0/invoke//method/ @@ -70,7 +70,7 @@ There are two ways to invoke a non-Dapr endpoint when communicating either to Da ``` ### Using appId when calling Dapr enabled applications -AppIDs are always used to call Dapr applications with the `appID` and `my-method. Read the [How-To: Invoke services using HTTP]({{< ref howto-invoke-discover-services.md >}}) guide for more information. For example: +AppIDs are always used to call Dapr applications with the `appID` and `my-method``. Read the [How-To: Invoke services using HTTP]({{< ref howto-invoke-discover-services.md >}}) guide for more information. For example: ```sh localhost:3500/v1.0/invoke//method/ @@ -81,7 +81,7 @@ curl http://localhost:3602/v1.0/invoke/orderprocessor/method/checkout ## Related Links -- [HTTPEndpoint reference]({{< ref httpendpoints-reference.md >}}) +- [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}}) - [Service invocation overview]({{< ref service-invocation-overview.md >}}) - [Service invocation API specification]({{< ref service_invocation_api.md >}}) diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-services-grpc.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-services-grpc.md index fc953f6034d..1de970c7d1f 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-services-grpc.md +++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-services-grpc.md @@ -3,7 +3,7 @@ type: docs title: "How-To: Invoke services using gRPC" linkTitle: "How-To: Invoke with gRPC" description: "Call between services using service invocation" -weight: 3000 +weight: 30 --- This article describe how to use Dapr to connect services using gRPC. diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-namespaces.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-namespaces.md index 6ee966558d1..87370dd1c51 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-namespaces.md +++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-namespaces.md @@ -2,7 +2,7 @@ type: docs title: "How to: Service invocation across namespaces" linkTitle: "How to: Service invocation namespaces" -weight: 1000 +weight: 50 description: "Call between services deployed to different namespaces" --- diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md index 4d2ce7a2c43..edd542ef985 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md @@ -2,13 +2,13 @@ type: docs title: "Service invocation overview" linkTitle: "Overview" -weight: 900 +weight: 10 description: "Overview of the service invocation API building block" --- Using service invocation, your application can reliably and securely communicate with other applications using the standard [gRPC](https://grpc.io) or [HTTP](https://www.w3.org/Protocols/) protocols. -In many microservice-based applications multiple services need the ability to communicate with one another. This inter-service communication requires that application developers handle problems like: +In many microservice-based applications, multiple services need the ability to communicate with one another. This inter-service communication requires that application developers handle problems like: - **Service discovery.** How do I discover my different services? - **Standardizing API calls between services.** How do I invoke methods between services? @@ -25,6 +25,10 @@ Dapr uses a sidecar architecture. To invoke an application using Dapr: - Each application communicates with its own instance of Dapr. - The Dapr instances discover and communicate with each other. +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=mtLMrajE5wVXJYz8&t=3598) demonstrates how Dapr service invocation works. + + + The diagram below is an overview of how Dapr's service invocation works between two Dapr-ized applications. Diagram showing the steps of service invocation @@ -61,7 +65,6 @@ In the event of call failures and transient errors, service invocation provides By default, all calls between applications are traced and metrics are gathered to provide insights and diagnostics for applications. This is especially important in production scenarios, providing call graphs and metrics on the calls between your services. For more information read about [observability]({{< ref observability-concept.md >}}). - ### Access control With access policies, applications can control: @@ -83,7 +86,7 @@ Dapr provides round robin load balancing of service invocation requests with the The diagram below shows an example of how this works. If you have 1 instance of an application with app ID `FrontEnd` and 3 instances of application with app ID `Cart` and you call from `FrontEnd` app to `Cart` app, Dapr round robins' between the 3 instances. These instance can be on the same machine or on different machines. . -Diagram showing the steps of service invocation +Diagram showing the steps of service invocation **Note**: App ID is unique per _application_, not application instance. Regardless how many instances of that application exist (due to scaling), all of them will share the same app ID. @@ -97,7 +100,7 @@ Following the above call sequence, suppose you have the applications as describe The diagram below shows sequence 1-7 again on a local machine showing the API calls: - + 1. The Node.js app has a Dapr app ID of `nodeapp`. The python app invokes the Node.js app's `neworder` method by POSTing `http://localhost:3500/v1.0/invoke/nodeapp/method/neworder`, which first goes to the python app's local Dapr sidecar. 2. Dapr discovers the Node.js app's location using name resolution component (in this case mDNS while self-hosted) which runs on your local machine. @@ -135,5 +138,5 @@ For quick testing, try using the Dapr CLI for service invocation: ## Next steps - Read the [service invocation API specification]({{< ref service_invocation_api.md >}}). This reference guide for service invocation describes how to invoke methods on other services. - Understand the [service invocation performance numbers]({{< ref perf-service-invocation.md >}}). -- Take a look at [observability]({{< ref monitoring.md >}}). Here you can dig into Dapr's monitoring tools like tracing, metrics and logging. +- Take a look at [observability]({{< ref observability >}}). Here you can dig into Dapr's monitoring tools like tracing, metrics and logging. - Read up on our [security practices]({{< ref security-concept.md >}}) around mTLS encryption, token authentication, and endpoint authorization. diff --git a/daprdocs/content/en/developing-applications/building-blocks/state-management/_index.md b/daprdocs/content/en/developing-applications/building-blocks/state-management/_index.md index fd6f95bdbe2..5b4a0ecb98a 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/state-management/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/state-management/_index.md @@ -5,3 +5,11 @@ linkTitle: "State management" weight: 20 description: Create long running stateful services --- + +{{% alert title="More about Dapr State Management" color="primary" %}} + Learn more about how to use Dapr State Management: + - Try the [State Management quickstart]({{< ref statemanagement-quickstart.md >}}). + - Explore state management via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [State Management API reference documentation]({{< ref state_api.md >}}). + - Browse the supported [state management component specs]({{< ref supported-state-stores >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md b/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md index a7dacc36194..afc6bd5f1e4 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md @@ -11,7 +11,11 @@ Your application can use Dapr's state management API to save, read, and query ke - Use **HTTP POST** to save or query key/value pairs. - Use **HTTP GET** to read a specific key and have its value returned. - + + +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=2_xX6mkU3UCy2Plr&t=6607) demonstrates how Dapr state management works. + + ## Features diff --git a/daprdocs/content/en/developing-applications/building-blocks/state-management/state-store-ttl.md b/daprdocs/content/en/developing-applications/building-blocks/state-management/state-store-ttl.md index 4717a1889b2..e5b22d3e004 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/state-management/state-store-ttl.md +++ b/daprdocs/content/en/developing-applications/building-blocks/state-management/state-store-ttl.md @@ -16,9 +16,15 @@ When state TTL has native support in the state store component, Dapr forwards th When a TTL is not specified, the default behavior of the state store is retained. -## Persisting state (ignoring an existing TTL) +## Explicit persistence bypassing globally defined TTL -To explicitly persist a state (ignoring any TTLs set for the key), specify a `ttlInSeconds` value of `-1`. +Persisting state applies to all state stores that let you specify a default TTL used for all data, either: +- Setting a global TTL value via a Dapr component, or +- When creating the state store outside of Dapr and setting a global TTL value. + +When no specific TTL is specified, the data expires after that global TTL period of time. This is not facilitated by Dapr. + +In addition, all state stores also support the option to _explicitly_ persist data. This means you can ignore the default database policy (which may have been set outside of Dapr or via a Dapr Component) to indefinitely retain a given database record. You can do this by setting `ttlInSeconds` to the value of `-1`. This value indicates to ignore any TTL value set. ## Supported components @@ -71,7 +77,7 @@ using Dapr.Client; await client.SaveStateAsync(storeName, stateKeyName, state, metadata: new Dictionary() { { - "metadata.ttlInSeconds", "120" + "ttlInSeconds", "120" } }); ``` diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/_index.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/_index.md index a8617ef85ae..a1b87a20ac3 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/workflow/_index.md +++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/_index.md @@ -4,4 +4,11 @@ title: "Workflow" linkTitle: "Workflow" weight: 100 description: "Orchestrate logic across various microservices" ---- \ No newline at end of file +--- + +{{% alert title="More about Dapr Workflow" color="primary" %}} + Learn more about how to use Dapr Workflow: + - Try the [Workflow quickstart]({{< ref workflow-quickstart.md >}}). + - Explore workflow via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Workflow API reference documentation]({{< ref workflow_api.md >}}). +{{% /alert %}} \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md index 28b1a36b6c3..c8e3de13187 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md +++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md @@ -141,12 +141,10 @@ Sometimes workflows will need to wait for events that are raised by external sys External events have a _name_ and a _payload_ and are delivered to a single workflow instance. Workflows can create "_wait for external event_" tasks that subscribe to external events and _await_ those tasks to block execution until the event is received. The workflow can then read the payload of these events and make decisions about which next steps to take. External events can be processed serially or in parallel. External events can be raised by other workflows or by workflow code. -{{% alert title="Note" color="primary" %}} -The ability to raise external events to workflows is not included in the alpha version of Dapr's workflow API. -{{% /alert %}} - Workflows can also wait for multiple external event signals of the same name, in which case they are dispatched to the corresponding workflow tasks in a first-in, first-out (FIFO) manner. If a workflow receives an external event signal but has not yet created a "wait for external event" task, the event will be saved into the workflow's history and consumed immediately after the workflow requests the event. +Learn more about [external system interaction.]({{< ref "workflow-patterns.md#external-system-interaction" >}}) + ## Limitations ### Workflow determinism and code restraints diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md index f5113c69544..9f70500c01f 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md +++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md @@ -16,7 +16,7 @@ The durable, resilient Dapr Workflow capability: - Offers a built-in workflow runtime for driving Dapr Workflow execution. - Provides SDKs for authoring workflows in code, using any language. -- Provides HTTP and gRPC APIs for managing workflows (start, query, suspend/resume, terminate). +- Provides HTTP and gRPC APIs for managing workflows (start, query, pause/resume, raise event, terminate, purge). - Integrates with any other workflow runtime via workflow components. Diagram showing basics of Dapr Workflow @@ -56,7 +56,10 @@ Same as Dapr actors, you can schedule reminder-like durable delays for any time When you create an application with workflow code and run it with Dapr, you can call specific workflows that reside in the application. Each individual workflow can be: - Started or terminated through a POST request -- Queried through a GET request +- Triggered to deliver a named event through a POST request +- Paused and then resumed through a POST request +- Purged from your state store through a POST request +- Queried for workflow status through a GET request [Learn more about how manage a workflow using HTTP calls.]({{< ref workflow_api.md >}}) diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md index 024ab82e10d..4ff10782be4 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md +++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md @@ -25,9 +25,10 @@ While the pattern is simple, there are many complexities hidden in the implement Dapr Workflow solves these complexities by allowing you to implement the task chaining pattern concisely as a simple function in the programming language of your choice, as shown in the following example. -{{< tabs ".NET" >}} +{{< tabs ".NET" Python >}} {{% codetab %}} + ```csharp // Expotential backoff retry policy that survives long outages @@ -45,7 +46,6 @@ try var result1 = await context.CallActivityAsync("Step1", wfInput, retryOptions); var result2 = await context.CallActivityAsync("Step2", result1, retryOptions); var result3 = await context.CallActivityAsync("Step3", result2, retryOptions); - var result4 = await context.CallActivityAsync("Step4", result3, retryOptions); return string.Join(", ", result4); } catch (TaskFailedException) // Task failures are surfaced as TaskFailedException @@ -56,14 +56,61 @@ catch (TaskFailedException) // Task failures are surfaced as TaskFailedException } ``` +{{% alert title="Note" color="primary" %}} +In the example above, `"Step1"`, `"Step2"`, `"Step3"`, and `"MyCompensation"` represent workflow activities, which are functions in your code that actually implement the steps of the workflow. For brevity, these activity implementations are left out of this example. +{{% /alert %}} + {{% /codetab %}} -{{< /tabs >}} +{{% codetab %}} + + +```python +import dapr.ext.workflow as wf + + +def task_chain_workflow(ctx: wf.DaprWorkflowContext, wf_input: int): + try: + result1 = yield ctx.call_activity(step1, input=wf_input) + result2 = yield ctx.call_activity(step2, input=result1) + result3 = yield ctx.call_activity(step3, input=result2) + except Exception as e: + yield ctx.call_activity(error_handler, input=str(e)) + raise + return [result1, result2, result3] + + +def step1(ctx, activity_input): + print(f'Step 1: Received input: {activity_input}.') + # Do some work + return activity_input + 1 + + +def step2(ctx, activity_input): + print(f'Step 2: Received input: {activity_input}.') + # Do some work + return activity_input * 2 + + +def step3(ctx, activity_input): + print(f'Step 3: Received input: {activity_input}.') + # Do some work + return activity_input ^ 2 + + +def error_handler(ctx, error): + print(f'Executing error handler: {error}.') + # Do some compensating work +``` {{% alert title="Note" color="primary" %}} -In the example above, `"Step1"`, `"Step2"`, `"MyCompensation"`, etc. represent workflow activities, which are functions in your code that actually implement the steps of the workflow. For brevity, these activity implementations are left out of this example. +Workflow retry policies will be available in a future version of the Python SDK. {{% /alert %}} +{{% /codetab %}} + +{{< /tabs >}} + As you can see, the workflow is expressed as a simple series of statements in the programming language of your choice. This allows any engineer in the organization to quickly understand the end-to-end flow without necessarily needing to understand the end-to-end system architecture. Behind the scenes, the Dapr Workflow runtime: @@ -88,9 +135,10 @@ In addition to the challenges mentioned in [the previous pattern]({{< ref "workf Dapr Workflows provides a way to express the fan-out/fan-in pattern as a simple function, as shown in the following example: -{{< tabs ".NET" >}} +{{< tabs ".NET" Python >}} {{% codetab %}} + ```csharp // Get a list of N work items to process in parallel. @@ -114,6 +162,46 @@ await context.CallActivityAsync("PostResults", sum); {{% /codetab %}} +{{% codetab %}} + + +```python +import time +from typing import List +import dapr.ext.workflow as wf + + +def batch_processing_workflow(ctx: wf.DaprWorkflowContext, wf_input: int): + # get a batch of N work items to process in parallel + work_batch = yield ctx.call_activity(get_work_batch, input=wf_input) + + # schedule N parallel tasks to process the work items and wait for all to complete + parallel_tasks = [ctx.call_activity(process_work_item, input=work_item) for work_item in work_batch] + outputs = yield wf.when_all(parallel_tasks) + + # aggregate the results and send them to another activity + total = sum(outputs) + yield ctx.call_activity(process_results, input=total) + + +def get_work_batch(ctx, batch_size: int) -> List[int]: + return [i + 1 for i in range(batch_size)] + + +def process_work_item(ctx, work_item: int) -> int: + print(f'Processing work item: {work_item}.') + time.sleep(5) + result = work_item * 2 + print(f'Work item {work_item} processed. Result: {result}.') + return result + + +def process_results(ctx, final_result: int): + print(f'Final result: {final_result}.') +``` + +{{% /codetab %}} + {{< /tabs >}} The key takeaways from this example are: @@ -214,9 +302,10 @@ Depending on the business needs, there may be a single monitor or there may be m Dapr Workflow supports this pattern natively by allowing you to implement _eternal workflows_. Rather than writing infinite while-loops ([which is an anti-pattern]({{< ref "workflow-features-concepts.md#infinite-loops-and-eternal-workflows" >}})), Dapr Workflow exposes a _continue-as-new_ API that workflow authors can use to restart a workflow function from the beginning with a new input. -{{< tabs ".NET" >}} +{{< tabs ".NET" Python >}} {{% codetab %}} + ```csharp public override async Task RunAsync(WorkflowContext context, MyEntityState myEntityState) @@ -256,6 +345,53 @@ public override async Task RunAsync(WorkflowContext context, MyEntitySta {{% /codetab %}} +{{% codetab %}} + + +```python +from dataclasses import dataclass +from datetime import timedelta +import random +import dapr.ext.workflow as wf + + +@dataclass +class JobStatus: + job_id: str + is_healthy: bool + + +def status_monitor_workflow(ctx: wf.DaprWorkflowContext, job: JobStatus): + # poll a status endpoint associated with this job + status = yield ctx.call_activity(check_status, input=job) + if not ctx.is_replaying: + print(f"Job '{job.job_id}' is {status}.") + + if status == "healthy": + job.is_healthy = True + next_sleep_interval = 60 # check less frequently when healthy + else: + if job.is_healthy: + job.is_healthy = False + ctx.call_activity(send_alert, input=f"Job '{job.job_id}' is unhealthy!") + next_sleep_interval = 5 # check more frequently when unhealthy + + yield ctx.create_timer(fire_at=ctx.current_utc_datetime + timedelta(seconds=next_sleep_interval)) + + # restart from the beginning with a new JobStatus input + ctx.continue_as_new(job) + + +def check_status(ctx, _) -> str: + return random.choice(["healthy", "unhealthy"]) + + +def send_alert(ctx, message: str): + print(f'*** Alert: {message}') +``` + +{{% /codetab %}} + {{< /tabs >}} A workflow implementing the monitor pattern can loop forever or it can terminate itself gracefully by not calling _continue-as-new_. @@ -284,9 +420,10 @@ The following diagram illustrates this flow. The following example code shows how this pattern can be implemented using Dapr Workflow. -{{< tabs ".NET" >}} +{{< tabs ".NET" Python >}} {{% codetab %}} + ```csharp public override async Task RunAsync(WorkflowContext context, OrderPayload order) @@ -331,13 +468,73 @@ In the example above, `RequestApprovalActivity` is the name of a workflow activi {{% /codetab %}} +{{% codetab %}} + + +```python +from dataclasses import dataclass +from datetime import timedelta +import dapr.ext.workflow as wf + + +@dataclass +class Order: + cost: float + product: str + quantity: int + + def __str__(self): + return f'{self.product} ({self.quantity})' + + +@dataclass +class Approval: + approver: str + + @staticmethod + def from_dict(dict): + return Approval(**dict) + + +def purchase_order_workflow(ctx: wf.DaprWorkflowContext, order: Order): + # Orders under $1000 are auto-approved + if order.cost < 1000: + return "Auto-approved" + + # Orders of $1000 or more require manager approval + yield ctx.call_activity(send_approval_request, input=order) + + # Approvals must be received within 24 hours or they will be canceled. + approval_event = ctx.wait_for_external_event("approval_received") + timeout_event = ctx.create_timer(timedelta(hours=24)) + winner = yield wf.when_any([approval_event, timeout_event]) + if winner == timeout_event: + return "Cancelled" + + # The order was approved + yield ctx.call_activity(place_order, input=order) + approval_details = Approval.from_dict(approval_event.get_result()) + return f"Approved by '{approval_details.approver}'" + + +def send_approval_request(_, order: Order) -> None: + print(f'*** Sending approval request for order: {order}') + + +def place_order(_, order: Order) -> None: + print(f'*** Placing order: {order}') +``` + +{{% /codetab %}} + {{< /tabs >}} The code that delivers the event to resume the workflow execution is external to the workflow. Workflow events can be delivered to a waiting workflow instance using the [raise event]({{< ref "howto-manage-workflow.md#raise-an-event" >}}) workflow management API, as shown in the following example: -{{< tabs ".NET" >}} +{{< tabs ".NET" Python >}} {{% codetab %}} + ```csharp // Raise the workflow event to the waiting workflow @@ -350,6 +547,23 @@ await daprClient.RaiseWorkflowEventAsync( {{% /codetab %}} +{{% codetab %}} + + +```python +from dapr.clients import DaprClient +from dataclasses import asdict + +with DaprClient() as d: + d.raise_workflow_event( + instance_id=instance_id, + workflow_component="dapr", + event_name="approval_received", + event_data=asdict(Approval("Jane Doe"))) +``` + +{{% /codetab %}} + {{< /tabs >}} External events don't have to be directly triggered by humans. They can also be triggered by other systems. For example, a workflow may need to pause and wait for a payment to be received. In this case, a payment system might publish an event to a pub/sub topic on receipt of a payment, and a listener on that topic can raise an event to the workflow using the raise event workflow API. diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md index a94e4d7230b..59dce6d2305 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/_index.md @@ -3,5 +3,5 @@ type: docs title: "Authenticate to Azure" linkTitle: "Authenticate to Azure" weight: 1600 -description: "Learn about authenticating Azure components using Azure Active Directory or Managed Service Identities" +description: "Learn about authenticating Azure components using Azure Active Directory or Managed Identities" --- \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md index 65cc1a24883..b020548eeef 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/authenticating-azure.md @@ -9,59 +9,74 @@ aliases: weight: 10000 --- -Certain Azure components for Dapr offer support for the *common Azure authentication layer*, which enables applications to access data stored in Azure resources by authenticating with Azure Active Directory (Azure AD). Thanks to this: -- Administrators can leverage all the benefits of fine-tuned permissions with Role-Based Access Control (RBAC). -- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Service Identities (MSI)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview). +Most Azure components for Dapr support authenticating with Azure AD (Azure Active Directory). Thanks to this: +- Administrators can leverage all the benefits of fine-tuned permissions with Azure Role-Based Access Control (RBAC). +- Applications running on Azure services such as Azure Container Apps, Azure Kubernetes Service, Azure VMs, or any other Azure platform services can leverage [Managed Identities (MI)](https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview) and [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview). These offer the ability to authenticate your applications without having to manage sensitive credentials. ## About authentication with Azure AD Azure AD is Azure's identity and access management (IAM) solution, which is used to authenticate and authorize users and services. -Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Key Vault, Cosmos DB, etc. +Azure AD is built on top of open standards such OAuth 2.0, which allows services (applications) to obtain access tokens to make requests to Azure services, including Azure Storage, Azure Service Bus, Azure Key Vault, Azure Cosmos DB, Azure Database for Postgres, Azure SQL, etc. > In Azure terminology, an application is also called a "Service Principal". -Some Azure components offer alternative authentication methods, such as systems based on "master keys" or "shared keys". Although both master keys and shared keys are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD. Using Azure AD offers benefits like the following. +Some Azure components offer alternative authentication methods, such as systems based on "shared keys" or "access tokens". Although these are valid and supported by Dapr, you should authenticate your Dapr components using Azure AD whenever possible to take advantage of many benefits, including: -### Managed Service Identities +- [Managed Identities and Workload Identity](#managed-identities-and-workload-identity) +- [Role-Based Access Control](#role-based-access-control) +- [Auditing](#auditing) +- [(Optional) Authentication using certificates](#optional-authentication-using-certificates) -With Managed Service Identities (MSI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service, an identity for your application can be assigned at the infrastructure level. +### Managed Identities and Workload Identity + +With Managed Identities (MI), your application can authenticate with Azure AD and obtain an access token to make requests to Azure services. When your application is running on a supported Azure service (such as Azure VMs, Azure Container Apps, Azure Web Apps, etc), an identity for your application can be assigned at the infrastructure level. + +Once using MI, your code doesn't have to deal with credentials, which: -Once using MSI, your code doesn't have to deal with credentials, which: - Removes the challenge of managing credentials safely - Allows greater separation of concerns between development and operations teams - Reduces the number of people with access to credentials - Simplifies operational aspects–especially when multiple environments are used -### Role-based Access Control +Applications running on Azure Kubernetes Service can similarly leverage [Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) to automatically provide an identity to individual pods. + +### Role-Based Access Control -When using Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make it read-only. +When using Azure Role-Based Access Control (RBAC) with supported services, permissions given to an application can be fine-tuned. For example, you can restrict access to a subset of data or make the access read-only. ### Auditing -Using Azure AD provides an improved auditing experience for access. +Using Azure AD provides an improved auditing experience for access. Tenant administrators can consult audit logs to track authentication requests. -### (Optional) Authenticate using certificates +### (Optional) Authentication using certificates -While Azure AD allows you to use MSI or RBAC, you still have the option to authenticate using certificates. +While Azure AD allows you to use MI, you still have the option to authenticate using certificates. ## Support for other Azure environments -By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China, Azure Government, or Azure Germany, you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values: +By default, Dapr components are configured to interact with Azure resources in the "public cloud". If your application is deployed to another cloud, such as Azure China or Azure Government ("sovereign clouds"), you can enable that for supported components by setting the `azureEnvironment` metadata property to one of the supported values: -- Azure public cloud (default): `"AZUREPUBLICCLOUD"` -- Azure China: `"AZURECHINACLOUD"` -- Azure Government: `"AZUREUSGOVERNMENTCLOUD"` -- Azure Germany: `"AZUREGERMANCLOUD"` +- Azure public cloud (default): `"AzurePublicCloud"` +- Azure China: `"AzureChinaCloud"` +- Azure Government: `"AzureUSGovernmentCloud"` + +> Support for sovereign clouds is experimental. ## Credentials metadata fields -To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component]({{< ref "#example-usage-in-a-dapr-component" >}}). +To authenticate with Azure AD, you will need to add the following credentials as values in the metadata for your [Dapr component](#example-usage-in-a-dapr-component). ### Metadata options -Depending on how you've passed credentials to your Dapr services, you have multiple metadata options. +Depending on how you've passed credentials to your Dapr services, you have multiple metadata options. + +- [Using client credentials](#authenticating-using-client-credentials) +- [Using a certificate](#authenticating-using-a-certificate) +- [Using Managed Identities (MI)](#authenticating-with-managed-identities-mi) +- [Using Workload Identity on AKS](#authenticating-with-workload-identity-on-aks) +- [Using Azure CLI credentials (development-only)](#authenticating-using-azure-cli-credentials-development-only) #### Authenticating using client credentials @@ -73,7 +88,7 @@ Depending on how you've passed credentials to your Dapr services, you have multi When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above. -#### Authenticating using a PFX certificate +#### Authenticating using a certificate | Field | Required | Details | Example | |--------|--------|--------|--------| @@ -85,27 +100,30 @@ When running on Kubernetes, you can also use references to Kubernetes secrets fo When running on Kubernetes, you can also use references to Kubernetes secrets for any or all of the values above. -#### Authenticating with Managed Service Identities (MSI) +#### Authenticating with Managed Identities (MI) | Field | Required | Details | Example | |-----------------|----------|----------------------------|------------------------------------------| | `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` | -Using MSI, you're not required to specify any value, although you may pass `azureClientId` if needed. +Using Managed Identities, the `azureClientId` field is generally recommended. The field is optional when using a system-assigned identity, but may be required when using user-assigned identities. + +#### Authenticating with Workload Identity on AKS + +When running on Azure Kubernetes Service (AKS), you can authenticate components using Workload Identity. Refer to the Azure AKS documentation on [enabling Workload Identity](https://learn.microsoft.com/azure/aks/workload-identity-overview) for your Kubernetes resources. + +#### Authenticating using Azure CLI credentials (development-only) + +> **Important:** This authentication method is recommended for **development only**. -### Aliases +This authentication method can be useful while developing on a local machine. You will need: -For backwards-compatibility reasons, the following values in the metadata are supported as aliases. Their use is discouraged. +- The [Azure CLI installed](https://learn.microsoft.com/cli/azure/install-azure-cli) +- Have successfully authenticated using the `az login` command -| Metadata key | Aliases (supported but deprecated) | -|----------------------------|------------------------------------| -| `azureTenantId` | `spnTenantId`, `tenantId` | -| `azureClientId` | `spnClientId`, `clientId` | -| `azureClientSecret` | `spnClientSecret`, `clientSecret` | -| `azureCertificate` | `spnCertificate` | -| `azureCertificateFile` | `spnCertificateFile` | -| `azureCertificatePassword` | `spnCertificatePassword` | +When Dapr is running on a host where there are credentials available for the Azure CLI, components can use those to authenticate automatically if no other authentication method is configuration. +Using this authentication method does not require setting any metadata option. ### Example usage in a Dapr component diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md index f76cf2fc957..d1be027ca98 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-aad.md @@ -62,6 +62,7 @@ Save the output values returned; you'll need them for Dapr to authenticate with ``` When adding the returned values to your Dapr component's metadata: + - `appId` is the value for `azureClientId` - `password` is the value for `azureClientSecret` (this was randomly-generated) - `tenant` is the value for `azureTenantId` @@ -93,11 +94,12 @@ Save the output values returned; you'll need them for Dapr to authenticate with ``` When adding the returned values to your Dapr component's metadata: + - `appId` is the value for `azureClientId` - `tenant` is the value for `azureTenantId` - `fileWithCertAndPrivateKey` indicates the location of the self-signed PFX certificate and private key. Use the contents of that file as `azureCertificate` (or write it to a file on the server and use `azureCertificateFile`) -> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as _PFX (PKCS#12)_. +> **Note:** While the generated file has the `.pem` extension, it contains a certificate and private key encoded as PFX (PKCS#12). {{% /codetab %}} @@ -122,26 +124,13 @@ Expected output: Service Principal ID: 1d0ccf05-5427-4b5e-8eb4-005ac5f9f163 ``` -The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID). - -**The Service Principal ID** is: -- Defined within an Azure tenant -- Used to grant access to Azure resources to an application - +The returned value above is the **Service Principal ID**, which is different from the Azure AD application ID (client ID). The Service Principal ID is defined within an Azure tenant and used to grant access to Azure resources to an application You'll use the Service Principal ID to grant permissions to an application to access Azure resources. Meanwhile, **the client ID** is used by your application to authenticate. You'll use the client ID in Dapr manifests to configure authentication with Azure services. Keep in mind that the Service Principal that was just created does not have access to any Azure resource by default. Access will need to be granted to each resource as needed, as documented in the docs for the components. -{{% alert title="Note" color="primary" %}} -This step is different from the [official Azure documentation](https://docs.microsoft.com/cli/azure/create-an-azure-service-principal-azure-cli). The short-hand commands included in the official documentation creates a Service Principal that has broad `read-write` access to all Azure resources in your subscription, which: - -- Grants your Service Principal more access than you likely desire. -- Applies _only_ to the Azure management plane (Azure Resource Manager, or ARM), which is irrelevant for Dapr components, which are designed to interact with the data plane of various services. - -{{% /alert %}} - ## Next steps -{{< button text="Use MSI >>" page="howto-msi.md" >}} +{{< button text="Use Managed Identities >>" page="howto-mi.md" >}} diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-msi.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md similarity index 73% rename from daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-msi.md rename to daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md index bebfa1b784c..5eb6a8f8683 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-msi.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-authentication/howto-mi.md @@ -1,14 +1,16 @@ --- type: docs -title: "How to: Use Managed Service Identities" -linkTitle: "How to: Use MSI" +title: "How to: Use Managed Identities" +linkTitle: "How to: Use MI" weight: 40000 -description: "Learn how to use Managed Service Identities" +aliases: + - "/developing-applications/integrations/azure/azure-authentication/howto-msi/" +description: "Learn how to use Managed Identities" --- -Using MSI, authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity. +Using Managed Identities (MI), authentication happens automatically by virtue of your application running on top of an Azure service that has an assigned identity. -For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credential. +For example, let's say you enable a managed service identity for an Azure VM, Azure Container App, or an Azure Kubernetes Service cluster. When you do, an Azure AD application is created for you and automatically assigned to the service. Your Dapr services can then leverage that identity to authenticate with Azure AD, transparently and without you having to specify any credentials. To get started with managed identities, you need to assign an identity to a new or existing Azure resource. The instructions depend on the service use. Check the following official documentation for the most appropriate instructions: @@ -19,8 +21,9 @@ To get started with managed identities, you need to assign an identity to a new - [Azure Virtual Machines Scale Sets (VMSS)](https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vmss) - [Azure Container Instance (ACI)](https://docs.microsoft.com/azure/container-instances/container-instances-managed-identity) +Dapr supports both system-assigned and user-assigned identities. -After assigning a managed identity to your Azure resource, you will have credentials such as: +After assigning an identity to your Azure resource, you will have credentials such as: ```json { @@ -31,7 +34,7 @@ After assigning a managed identity to your Azure resource, you will have credent } ``` -From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your Service Principal. +From the returned values, take note of **`principalId`**, which is the Service Principal ID that was created. You'll use that to grant access to Azure resources to your identity. ## Next steps diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-kubernetes-service-extension.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-kubernetes-service-extension.md index aae063bffdd..ccfda323f59 100644 --- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-kubernetes-service-extension.md +++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-kubernetes-service-extension.md @@ -14,4 +14,10 @@ The recommended approach for installing Dapr on AKS is to use the AKS Dapr exten If you install Dapr through the AKS extension, best practice is to continue using the extension for future management of Dapr _instead of the Dapr CLI_. Combining the two tools can cause conflicts and result in undesired behavior. {{% /alert %}} +Prerequisites for using the Dapr extension for AKS: +- [An Azure subscription](https://azure.microsoft.com/free/?WT.mc_id=A261C142F) +- [The latest version of the Azure CLI](https://learn.microsoft.com/cli/azure/install-azure-cli) +- [An existing AKS cluster](https://learn.microsoft.com/azure/aks/tutorial-kubernetes-deploy-cluster) +- [The Azure Kubernetes Service RBAC Admin role](https://learn.microsoft.com/azure/role-based-access-control/built-in-roles#azure-kubernetes-service-rbac-admin) + {{< button text="Learn more about the Dapr extension for AKS" link="https://learn.microsoft.com/azure/aks/dapr" >}} diff --git a/daprdocs/content/en/developing-applications/integrations/gRPC-integration.md b/daprdocs/content/en/developing-applications/integrations/gRPC-integration.md index c7999a63798..cd3380c972d 100644 --- a/daprdocs/content/en/developing-applications/integrations/gRPC-integration.md +++ b/daprdocs/content/en/developing-applications/integrations/gRPC-integration.md @@ -132,7 +132,7 @@ The following steps will show how to create an app that exposes a server for wit "github.com/golang/protobuf/ptypes/empty" commonv1pb "github.com/dapr/dapr/pkg/proto/common/v1" - pb "github.com/dapr/go-sdk/dapr/proto/runtime/v1" + pb "github.com/dapr/dapr/pkg/proto/runtime/v1" "google.golang.org/grpc" ) ``` diff --git a/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-how-to-debug-multiple-dapr-apps.md b/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-how-to-debug-multiple-dapr-apps.md index a2085158100..86569fb2fe1 100644 --- a/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-how-to-debug-multiple-dapr-apps.md +++ b/daprdocs/content/en/developing-applications/local-development/ides/vscode/vscode-how-to-debug-multiple-dapr-apps.md @@ -176,7 +176,7 @@ Below are the supported parameters for VS Code tasks. These parameters are equiv | `appProtocol` | Tells Dapr which protocol your application is using. Valid options are `http`, `grpc`, `https`, `grpcs`, `h2c`. Default is `http`. | No | `"appProtocol": "http"` | `args` | Sets a list of arguments to pass on to the Dapr app | No | "args": [] | `componentsPath` | Path for components directory. If empty, components will not be loaded. | No | `"componentsPath": "./components"` -| `config` | Tells Dapr which Configuration CRD to use | No | `"config": "./config"` +| `config` | Tells Dapr which Configuration resource to use | No | `"config": "./config"` | `controlPlaneAddress` | Address for a Dapr control plane | No | `"controlPlaneAddress": "http://localhost:1366/"` | `enableProfiling` | Enable profiling | No | `"enableProfiling": false` | `enableMtls` | Enables automatic mTLS for daprd to daprd communication channels | No | `"enableMtls": false` diff --git a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md index 6dad64c9f46..8bca3008036 100644 --- a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md +++ b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md @@ -83,7 +83,7 @@ apps: appProtocol: http appPort: 8080 appHealthCheckPath: "/healthz" - command: ["python3" "app.py"] + command: ["python3", "app.py"] appLogDestination: file # (optional), can be file, console or fileAndConsole. default is fileAndConsole. daprdLogDestination: file # (optional), can be file, console or fileAndConsole. default is file. - appID: backend # optional diff --git a/daprdocs/content/en/developing-applications/sdks/_index.md b/daprdocs/content/en/developing-applications/sdks/_index.md index 64a774f21c0..321d053cd19 100644 --- a/daprdocs/content/en/developing-applications/sdks/_index.md +++ b/daprdocs/content/en/developing-applications/sdks/_index.md @@ -11,34 +11,25 @@ The Dapr SDKs are the easiest way for you to get Dapr into your application. Cho ## SDK packages -- **Client SDK**: The Dapr client allows you to invoke Dapr building block APIs and perform actions such as: - - [Invoke]({{< ref service-invocation >}}) methods on other services - - Store and get [state]({{< ref state-management >}}) - - [Publish and subscribe]({{< ref pubsub >}}) to message topics - - Interact with external resources through input and output [bindings]({{< ref bindings >}}) - - Get [secrets]({{< ref secrets >}}) from secret stores - - Interact with [virtual actors]({{< ref actors >}}) -- **Server extensions**: The Dapr service extensions allow you to create services that can: - - Be [invoked]({{< ref service-invocation >}}) by other services - - [Subscribe]({{< ref pubsub >}}) to topics -- **Actor SDK**: The Dapr Actor SDK allows you to build virtual actors with: - - Methods that can be [invoked]({{< ref "howto-actors.md#actor-method-invocation" >}}) by other services - - [State]({{< ref "howto-actors.md#actor-state-management" >}}) that can be stored and retrieved - - [Timers]({{< ref "howto-actors.md#actor-timers" >}}) with callbacks - - Persistent [reminders]({{< ref "howto-actors.md#actor-reminders" >}}) +Select your [preferred language below]({{< ref "#sdk-languages" >}}) to learn more about client, server, actor, and workflow packages. + +- **Client**: The Dapr client allows you to invoke Dapr building block APIs and perform each building block's actions +- **Server extensions**: The Dapr service extensions allow you to create services that can be invoked by other services and subscribe to topics +- **Actor**: The Dapr Actor SDK allows you to build virtual actors with methods, state, timers, and persistent reminders +- **Workflow**: Dapr Workflow makes it easy for you to write long running business logic and integrations in a reliable way ## SDK languages -| Language | Status | Client SDK | Server extensions | Actor SDK | -|----------|:------|:----------:|:-----------:|:---------:| -| [.NET]({{< ref dotnet >}}) | Stable | ✔ | [ASP.NET Core](https://github.com/dapr/dotnet-sdk/tree/master/examples/AspNetCore) | ✔ | -| [Python]({{< ref python >}}) | Stable | ✔ | [gRPC]({{< ref python-grpc.md >}})
[FastAPI]({{< ref python-fastapi.md >}})
[Flask]({{< ref python-flask.md >}})| ✔ | -| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ | -| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ | -| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ | -| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ | -| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | | -| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | | +| Language | Status | Client | Server extensions | Actor | Workflow | +|----------|:------|:----------:|:-----------:|:---------:|:---------:| +| [.NET]({{< ref dotnet >}}) | Stable | ✔ | [ASP.NET Core](https://github.com/dapr/dotnet-sdk/tree/master/examples/AspNetCore) | ✔ | ✔ | +| [Python]({{< ref python >}}) | Stable | ✔ | [gRPC]({{< ref python-grpc.md >}})
[FastAPI]({{< ref python-fastapi.md >}})
[Flask]({{< ref python-flask.md >}})| ✔ | ✔ | +| [Java]({{< ref java >}}) | Stable | ✔ | Spring Boot | ✔ | | +| [Go]({{< ref go >}}) | Stable | ✔ | ✔ | ✔ | | +| [PHP]({{< ref php >}}) | Stable | ✔ | ✔ | ✔ | | +| [Javascript]({{< ref js >}}) | Stable| ✔ | | ✔ | | +| [C++](https://github.com/dapr/cpp-sdk) | In development | ✔ | | | +| [Rust](https://github.com/dapr/rust-sdk) | In development | ✔ | | | | ## Further reading diff --git a/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md index bac891b5ae3..b818c434836 100644 --- a/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md @@ -189,6 +189,8 @@ spec: metadata: - name: schedule value: "@every 10s" # valid cron schedule + - name: direction + value: "input" # direction of the cron binding ``` **Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked. @@ -216,6 +218,8 @@ spec: metadata: - name: url # Required value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10" + - name: direction + value: "output" # direction of the postgresql binding ``` In the YAML file: @@ -391,6 +395,8 @@ spec: metadata: - name: schedule value: "@every 10s" # valid cron schedule + - name: direction + value: "input" # direction of the cron binding ``` **Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked. @@ -418,6 +424,8 @@ spec: metadata: - name: url # Required value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10" + - name: direction + value: "output" # direction of the postgresql binding ``` In the YAML file: @@ -595,6 +603,8 @@ spec: metadata: - name: schedule value: "@every 10s" # valid cron schedule + - name: direction + value: "input" # direction of the cron binding ``` **Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked. @@ -622,6 +632,8 @@ spec: metadata: - name: url # Required value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10" + - name: direction + value: "output" # direction of the postgresql binding ``` In the YAML file: @@ -805,6 +817,8 @@ spec: metadata: - name: schedule value: "@every 10s" # valid cron schedule + - name: direction + value: "input" # direction of the cron binding ``` **Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked. @@ -832,6 +846,8 @@ spec: metadata: - name: url # Required value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10" + - name: direction + value: "output" # direction of the postgresql binding ``` In the YAML file: @@ -1017,6 +1033,8 @@ spec: metadata: - name: schedule value: "@every 10s" # valid cron schedule + - name: direction + value: "input" # direction of the cron binding ``` **Note:** The `metadata` section of `binding-cron.yaml` contains a [Cron expression]({{< ref cron.md >}}) that specifies how often the binding is invoked. @@ -1044,6 +1062,8 @@ spec: metadata: - name: url # Required value: "user=postgres password=docker host=localhost port=5432 dbname=orders pool_min_conns=1 pool_max_conns=10" + - name: direction + value: "output" # direction of the postgresql binding ``` In the YAML file: diff --git a/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md index ba5f56523bb..fc61df703cf 100644 --- a/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md @@ -600,7 +600,7 @@ go build . Run the `order-processor` service alongside a Dapr sidecar. ```bash -dapr run --app-port 6001 --app-id order-processor --app-protocol http --dapr-http-port 3501 -- go run . +dapr run --app-port 6006 --app-id order-processor --app-protocol http --dapr-http-port 3501 -- go run . ``` Each order is received via an HTTP POST request and processed by the diff --git a/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md index 0e428007db0..d139561cb04 100644 --- a/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md +++ b/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md @@ -12,12 +12,6 @@ The workflow building block is currently in **alpha**. Let's take a look at the Dapr [Workflow building block]({{< ref workflow >}}). In this Quickstart, you'll create a simple console application to demonstrate Dapr's workflow programming model and the workflow management APIs. -The `order-processor` console app starts and manages the lifecycle of the `OrderProcessingWorkflow` workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks: -- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow -- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase -- `ProcessPaymentActivity`: Processes and authorizes the payment -- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value - In this guide, you'll: - Run the `order-processor` application. @@ -26,13 +20,19 @@ In this guide, you'll: -Currently, you can experience the Dapr Workflow using the .NET SDK. {{< tabs ".NET" "Python" >}} {{% codetab %}} +The `order-processor` console app starts and manages the lifecycle of an order processing workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks: +- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow +- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase +- `ProcessPaymentActivity`: Processes and authorizes the payment +- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value + + ### Step 1: Pre-requisites For this example, you will need: @@ -259,6 +259,16 @@ The `Activities` directory holds the four workflow activities used by the workfl {{% codetab %}} +The `order-processor` console app starts and manages the `order_processing_workflow`, which simulates purchasing items from a store. The workflow consists of five unique workflow activities, or tasks: + +- `notify_activity`: Utilizes a logger to print out messages throughout the workflow. These messages notify you when: + - You have insufficient inventory + - Your payment couldn't be processed, etc. +- `process_payment_activity`: Processes and authorizes the payment. +- `verify_inventory_activity`: Checks the state store to ensure there is enough inventory present for purchase. +- `update_inventory_activity`: Removes the requested items from the state store and updates the store with the new remaining inventory value. +- `request_approval_activity`: Seeks approval from the manager if payment is greater than 50,000 USD. + ### Step 1: Pre-requisites For this example, you will need: diff --git a/daprdocs/content/en/operations/components/setup-pubsub/_index.md b/daprdocs/content/en/operations/components/setup-pubsub/_index.md index aa3255aa528..0c0e4fe6c09 100644 --- a/daprdocs/content/en/operations/components/setup-pubsub/_index.md +++ b/daprdocs/content/en/operations/components/setup-pubsub/_index.md @@ -42,6 +42,9 @@ Even though metadata values can contain secrets in plain text, it is recommended Depending on the pub/sub message bus you are using and how it is configured, topics may be created automatically. Even if the message bus supports automatic topic creation, it is a common governance practice to disable it in production environments. You may still need to use a CLI, admin console, or request form to manually create the topics required by your application. {{% /alert %}} +While all pub/sub components support `consumerID` metadata, the runtime creates a consumer ID if you do not supply one. All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup. +For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}). + Visit [this guide]({{< ref "howto-publish-subscribe.md#step-3-publish-a-topic" >}}) for instructions on configuring and using pub/sub components. ## Related links diff --git a/daprdocs/content/en/operations/components/setup-pubsub/pubsub-namespaces.md b/daprdocs/content/en/operations/components/setup-pubsub/pubsub-namespaces.md index 81099ca5cf5..612d84bce39 100644 --- a/daprdocs/content/en/operations/components/setup-pubsub/pubsub-namespaces.md +++ b/daprdocs/content/en/operations/components/setup-pubsub/pubsub-namespaces.md @@ -30,6 +30,10 @@ The table below shows which resources are deployed to which namespaces: | Python subscriber | X | | | React UI publisher | | X | +{{% alert title="Note" color="primary" %}} +All pub/sub components support limiting pub/sub topics to specific applications using [namespace or component scopes]({{< ref pubsub-scopes.md >}}). +{{% /alert %}} + ## Pre-requisites * [Dapr installed on Kubernetes]({{< ref "kubernetes-deploy.md" >}}) in any namespace since Dapr works at the cluster level. diff --git a/daprdocs/content/en/operations/configuration/configuration-overview.md b/daprdocs/content/en/operations/configuration/configuration-overview.md index 1f04714d7ba..f900beed05c 100644 --- a/daprdocs/content/en/operations/configuration/configuration-overview.md +++ b/daprdocs/content/en/operations/configuration/configuration-overview.md @@ -18,13 +18,13 @@ A Dapr sidecar can also apply a configuration by using a `--config` flag to the #### Kubernetes sidecar -In Kubernetes mode the Dapr configuration is a Configuration CRD, that is applied to the cluster. For example: +In Kubernetes mode the Dapr configuration is a Configuration resource, that is applied to the cluster. For example: ```bash kubectl apply -f myappconfig.yaml ``` -You can use the Dapr CLI to list the Configuration CRDs +You can use the Dapr CLI to list the Configuration resources ```bash dapr configurations -k @@ -269,11 +269,11 @@ spec: action: allow ``` -## Control-plane configuration +## Control plane configuration There is a single configuration file called `daprsystem` installed with the Dapr control plane system services that applies global settings. This is only set up when Dapr is deployed to Kubernetes. -### Control-plane configuration settings +### Control plane configuration settings A Dapr control plane configuration contains the following sections: diff --git a/daprdocs/content/en/operations/configuration/secret-scope.md b/daprdocs/content/en/operations/configuration/secret-scope.md index a937f56e58a..37ba0ff1873 100644 --- a/daprdocs/content/en/operations/configuration/secret-scope.md +++ b/daprdocs/content/en/operations/configuration/secret-scope.md @@ -3,12 +3,12 @@ type: docs title: "How-To: Limit the secrets that can be read from secret stores" linkTitle: "Limit secret store access" weight: 3000 -description: "To limit the secrets to which the Dapr application has access, users can define secret scopes by augmenting existing configuration CRD with restrictive permissions." +description: "To limit the secrets to which the Dapr application has access, users can define secret scopes by augmenting existing configuration resource with restrictive permissions." --- In addition to scoping which applications can access a given component, for example a secret store component (see [Scoping components]({{< ref "component-scopes.md">}})), a named secret store component itself can be scoped to one or more secrets for an application. By defining `allowedSecrets` and/or `deniedSecrets` list, applications can be restricted to access only specific secrets. -Follow [these instructions]({{< ref "configuration-overview.md" >}}) to define a configuration CRD. +Follow [these instructions]({{< ref "configuration-overview.md" >}}) to define a configuration resource. ## Configure secrets access diff --git a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-aks.md b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-aks.md index 87d27dbc190..8ef25a899f3 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-aks.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-aks.md @@ -1,56 +1,63 @@ --- type: docs -title: "Setup an Azure Kubernetes Service (AKS) cluster" +title: "Set up an Azure Kubernetes Service (AKS) cluster" linkTitle: "Azure Kubernetes Service (AKS)" weight: 2000 description: > - How to setup Dapr on an Azure Kubernetes Cluster. + Learn how to set up an Azure Kubernetes Cluster --- -# Set up an Azure Kubernetes Service cluster +This guide walks you through installing an Azure Kubernetes Service (AKS) cluster. If you need more information, refer to [Quickstart: Deploy an AKS cluster using the Azure CLI](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough) ## Prerequisites -- [Docker](https://docs.docker.com/install/) -- [kubectl](https://kubernetes.io/docs/tasks/tools/) -- [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli?view=azure-cli-latest) +- Install: + - [Docker](https://docs.docker.com/install/) + - [kubectl](https://kubernetes.io/docs/tasks/tools/) + - [Azure CLI](https://docs.microsoft.com/cli/azure/install-azure-cli) -## Deploy an Azure Kubernetes Service cluster +## Deploy an AKS cluster -This guide walks you through installing an Azure Kubernetes Service cluster. If you need more information, refer to [Quickstart: Deploy an Azure Kubernetes Service (AKS) cluster using the Azure CLI](https://docs.microsoft.com/azure/aks/kubernetes-walkthrough) +1. In the terminal, log into Azure. -1. Login to Azure + ```bash + az login + ``` -```bash -az login -``` +1. Set your default subscription: -2. Set the default subscription + ```bash + az account set -s [your_subscription_id] + ``` -```bash -az account set -s [your_subscription_id] -``` +1. Create a resource group. -3. Create a resource group + ```bash + az group create --name [your_resource_group] --location [region] + ``` -```bash -az group create --name [your_resource_group] --location [region] -``` +1. Create an AKS cluster. To use a specific version of Kubernetes, use `--kubernetes-version` (1.13.x or newer version required). -4. Create an Azure Kubernetes Service cluster + ```bash + az aks create --resource-group [your_resource_group] --name [your_aks_cluster_name] --node-count 2 --enable-addons http_application_routing --generate-ssh-keys + ``` -> **Note:** To use a specific version of Kubernetes use `--kubernetes-version` (1.13.x or newer version required) +1. Get the access credentials for the AKS cluster. -```bash -az aks create --resource-group [your_resource_group] --name [your_aks_cluster_name] --node-count 2 --enable-addons http_application_routing --generate-ssh-keys -``` + ```bash + az aks get-credentials -n [your_aks_cluster_name] -g [your_resource_group] + ``` -5. Get the access credentials for the Azure Kubernetes cluster +## AKS Edge Essentials +To create a single-machine K8s/K3s Linux-only cluster using Azure Kubernetes Service (AKS) Edge Essentials, you can follow the quickstart guide available at [AKS Edge Essentials quickstart guide](https://learn.microsoft.com/en-us/azure/aks/hybrid/aks-edge-quickstart). -```bash -az aks get-credentials -n [your_aks_cluster_name] -g [your_resource_group] -``` +{{% alert title="Note" color="primary" %}} +AKS Edge Essentials does not come with a default storage class, which may cause issues when deploying Dapr. To avoid this, make sure to enable the **local-path-provisioner** storage class on the cluster before deploying Dapr. If you need more information, refer to [Local Path Provisioner on AKS EE](https://learn.microsoft.com/azure/aks/hybrid/aks-edge-howto-use-storage-local-path). +{{% /alert %}} -## Next steps +## Related links -{{< button text="Install Dapr using the AKS Dapr extension >>" page="azure-kubernetes-service-extension" >}} +- Learn more about [the Dapr extension for AKS]({{< ref azure-kubernetes-service-extension >}}) + - [Install the Dapr extension for AKS](https://learn.microsoft.com/azure/aks/dapr) + - [Configure the Dapr extension for AKS](https://learn.microsoft.com/azure/aks/dapr-settings) + - [Deploy and run workflows with the Dapr extension for AKS](https://learn.microsoft.com/azure/aks/dapr-workflow) diff --git a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-gke.md b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-gke.md index fad705de928..8fb20880dc8 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-gke.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-gke.md @@ -1,55 +1,86 @@ --- type: docs -title: "Setup a Google Kubernetes Engine (GKE) cluster" +title: "Set up a Google Kubernetes Engine (GKE) cluster" linkTitle: "Google Kubernetes Engine (GKE)" weight: 3000 -description: "Setup a Google Kubernetes Engine cluster" +description: "Set up a Google Kubernetes Engine cluster" --- ### Prerequisites -- [kubectl](https://kubernetes.io/docs/tasks/tools/) -- [Google Cloud SDK](https://cloud.google.com/sdk) +- Install: + - [kubectl](https://kubernetes.io/docs/tasks/tools/) + - [Google Cloud SDK](https://cloud.google.com/sdk) ## Create a new cluster + +Create a GKE cluster by running the following: + ```bash $ gcloud services enable container.googleapis.com && \ gcloud container clusters create $CLUSTER_NAME \ --zone $ZONE \ --project $PROJECT_ID ``` -For more options refer to the [Google Cloud SDK docs](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create), or instead create a cluster through the [Cloud Console](https://console.cloud.google.com/kubernetes) for a more interactive experience. +For more options: +- Refer to the [Google Cloud SDK docs](https://cloud.google.com/sdk/gcloud/reference/container/clusters/create). +- Create a cluster through the [Cloud Console](https://console.cloud.google.com/kubernetes) for a more interactive experience. + +## Sidecar injection for private GKE clusters + +_**Sidecar injection for private clusters requires extra steps.**_ -{{% alert title="For private GKE clusters" color="warning" %}} -Sidecar injection will not work for private clusters without extra steps. An automatically created firewall rule for master access does not open port 4000. This is needed for Dapr sidecar injection. +In private GKE clusters, an automatically created firewall rule for master access doesn't open port 4000, which Dapr needs for sidecar injection. + +Review the relevant firewall rule: -To review the relevant firewall rule: ```bash $ gcloud compute firewall-rules list --filter="name~gke-${CLUSTER_NAME}-[0-9a-z]*-master" ``` -To replace the existing rule and allow kubernetes master access to port 4000: +Replace the existing rule and allow Kubernetes master access to port 4000: + ```bash $ gcloud compute firewall-rules update --allow tcp:10250,tcp:443,tcp:4000 ``` -{{% /alert %}} ## Retrieve your credentials for `kubectl` +Run the following command to retrieve your credentials: + ```bash $ gcloud container clusters get-credentials $CLUSTER_NAME \ --zone $ZONE \ --project $PROJECT_ID ``` -## (optional) Install Helm v3 +## Install Helm v3 (optional) -1. [Install Helm v3 client](https://helm.sh/docs/intro/install/) +If you are using Helm, install the [Helm v3 client](https://helm.sh/docs/intro/install/). -> **Note:** The latest Dapr helm chart no longer supports Helm v2. Please migrate from helm v2 to helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). +{{% alert title="Important" color="warning" %}} +The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). +{{% /alert %}} + +## Troubleshooting -2. In case you need permissions the kubernetes dashboard (i.e. configmaps is forbidden: User "system:serviceaccount:kube-system:kubernetes-dashboard" cannot list configmaps in the namespace "default", etc.) execute this command +### Kubernetes dashboard permissions + +Let's say you receive an error message similar to the following: + +``` +configmaps is forbidden: User "system:serviceaccount:kube-system:kubernetes-dashboard" cannot list configmaps in the namespace "default" +``` + +Execute this command: ```bash kubectl create clusterrolebinding kubernetes-dashboard -n kube-system --clusterrole=cluster-admin --serviceaccount=kube-system:kubernetes-dashboard ``` + +## Related links +- [Learn more about GKE clusters](https://cloud.google.com/kubernetes-engine/docs) +- [Try out a Dapr quickstart]({{< ref quickstarts.md >}}) +- Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}}) +- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) +- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md index 528781b1bfe..00d34d93ac8 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-kind.md @@ -4,108 +4,117 @@ title: "Set up a KiND cluster" linkTitle: "KiND" weight: 1100 description: > - How to set up Dapr on a KiND cluster. + How to set up a KiND cluster --- -# Set up a KiND cluster - ## Prerequisites -- [Docker](https://docs.docker.com/install/) -- [kubectl](https://kubernetes.io/docs/tasks/tools/) - -> Note: For Windows, enable Virtualization in BIOS and [install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v) +- Install: + - [Docker](https://docs.docker.com/install/) + - [kubectl](https://kubernetes.io/docs/tasks/tools/) +- For Windows: + - Enable Virtualization in BIOS + - [Install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v) ## Install and configure KiND -Make sure you follow one of the [Installation](https://kind.sigs.k8s.io/docs/user/quick-start) options for KiND. +[Refer to the KiND documentation to install.](https://kind.sigs.k8s.io/docs/user/quick-start) -In case you are using Docker Desktop, double-check that you have performed the recommended [settings](https://kind.sigs.k8s.io/docs/user/quick-start#settings-for-docker-desktop) (4 CPUs and 8 GiB of RAM available to Docker Engine). +If you are using Docker Desktop, verify that you have [the recommended settings](https://kind.sigs.k8s.io/docs/user/quick-start#settings-for-docker-desktop). ## Configure and create the KiND cluster 1. Create a file named `kind-cluster-config.yaml`, and paste the following: -```yaml -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -nodes: -- role: control-plane - kubeadmConfigPatches: - - | - kind: InitConfiguration - nodeRegistration: - kubeletExtraArgs: - node-labels: "ingress-ready=true" - extraPortMappings: - - containerPort: 80 - hostPort: 8081 - protocol: TCP - - containerPort: 443 - hostPort: 8443 - protocol: TCP -- role: worker -- role: worker -``` - -This is going to request KiND to spin up a kubernetes cluster comprised of a control plane and two worker nodes. It also allows for future setup of ingresses and exposes container ports to the host machine. - -2. Run the `kind create cluster` providing the cluster configuration file: - -```bash -kind create cluster --config kind-cluster-config.yaml -``` - -Wait until the cluster is created, the output should look like this: - -```md -Creating cluster "kind" ... - ✓ Ensuring node image (kindest/node:v1.21.1) 🖼 - ✓ Preparing nodes 📦 📦 📦 - ✓ Writing configuration 📜 - ✓ Starting control-plane 🕹️ - ✓ Installing CNI 🔌 - ✓ Installing StorageClass 💾 - ✓ Joining worker nodes 🚜 -Set kubectl context to "kind-kind" -You can now use your cluster with: - -kubectl cluster-info --context kind-kind - -Thanks for using kind! 😊 -``` - -## Dapr - -1. Initialize Dapr: -```bash -dapr init --kubernetes -``` - -Once Dapr finishes initializing its core components are ready to be used on the cluster. - -To verify the status of these components run: -```bash -dapr status -k -``` -the output should look like this: - -```md - NAME NAMESPACE HEALTHY STATUS REPLICAS VERSION AGE CREATED - dapr-sentry dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17 - dapr-operator dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17 - dapr-sidecar-injector dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17 - dapr-dashboard dapr-system True Running 1 0.9.0 53s 2021-12-10 09:27.17 - dapr-placement-server dapr-system True Running 1 1.5.1 52s 2021-12-10 09:27.18 -``` - -2. Forward a port to [Dapr dashboard](https://docs.dapr.io/reference/cli/dapr-dashboard/): - -```bash -dapr dashboard -k -p 9999 -``` - -So that you can validate that the setup finished successfully by navigating to `http://localhost:9999`. - -## Next steps -- [Try out a Dapr quickstart]({{< ref quickstarts.md >}}) + ```yaml + kind: Cluster + apiVersion: kind.x-k8s.io/v1alpha4 + nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 8081 + protocol: TCP + - containerPort: 443 + hostPort: 8443 + protocol: TCP + - role: worker + - role: worker + ``` + + This cluster configuration: + - Requests KiND to spin up a Kubernetes cluster comprised of a control plane and two worker nodes. + - Allows for future setup of ingresses. + - Exposes container ports to the host machine. + +1. Run the `kind create cluster` command, providing the cluster configuration file: + + ```bash + kind create cluster --config kind-cluster-config.yaml + ``` + + **Expected output** + + ```md + Creating cluster "kind" ... + ✓ Ensuring node image (kindest/node:v1.21.1) 🖼 + ✓ Preparing nodes 📦 📦 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + ✓ Joining worker nodes 🚜 + Set kubectl context to "kind-kind" + You can now use your cluster with: + + kubectl cluster-info --context kind-kind + + Thanks for using kind! 😊 + ``` + +## Initialize and run Dapr + +1. Initialize Dapr in Kubernetes. + + ```bash + dapr init --kubernetes + ``` + + Once Dapr finishes initializing, you can use its core components on the cluster. + +1. Verify the status of the Dapr components: + + ```bash + dapr status -k + ``` + + **Expected output** + + ```md + NAME NAMESPACE HEALTHY STATUS REPLICAS VERSION AGE CREATED + dapr-sentry dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17 + dapr-operator dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17 + dapr-sidecar-injector dapr-system True Running 1 1.5.1 53s 2021-12-10 09:27.17 + dapr-dashboard dapr-system True Running 1 0.9.0 53s 2021-12-10 09:27.17 + dapr-placement-server dapr-system True Running 1 1.5.1 52s 2021-12-10 09:27.18 + ``` + +1. Forward a port to [Dapr dashboard](https://docs.dapr.io/reference/cli/dapr-dashboard/): + + ```bash + dapr dashboard -k -p 9999 + ``` + +1. Navigate to `http://localhost:9999` to validate a successful setup. + +## Related links +- [Try out a Dapr quickstart]({{< ref quickstarts.md >}}) +- Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}}) +- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) +- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-minikube.md b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-minikube.md index 3880e4fa95b..78f4c0786a0 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-minikube.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/cluster/setup-minikube.md @@ -1,60 +1,63 @@ --- type: docs -title: "Setup an Minikube cluster" +title: "Set up a Minikube cluster" linkTitle: "Minikube" weight: 1000 description: > - How to setup Dapr on a Minikube cluster. + How to setup a Minikube cluster --- -# Set up a Minikube cluster - ## Prerequisites -- [Docker](https://docs.docker.com/install/) -- [kubectl](https://kubernetes.io/docs/tasks/tools/) -- [Minikube](https://minikube.sigs.k8s.io/docs/start/) +- Install: + - [Docker](https://docs.docker.com/install/) + - [kubectl](https://kubernetes.io/docs/tasks/tools/) + - [Minikube](https://minikube.sigs.k8s.io/docs/start/) +- For Windows: + - Enable Virtualization in BIOS + - [Install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v) -> Note: For Windows, enable Virtualization in BIOS and [install Hyper-V](https://docs.microsoft.com/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v) +{{% alert title="Note" color="primary" %}} +See [the official Minikube documentation on drivers](https://minikube.sigs.k8s.io/docs/reference/drivers/) for details on supported drivers and how to install plugins. +{{% /alert %}} ## Start the Minikube cluster -1. (optional) Set the default VM driver - -```bash -minikube config set vm-driver [driver_name] -``` - -> Note: See [DRIVERS](https://minikube.sigs.k8s.io/docs/reference/drivers/) for details on supported drivers and how to install plugins. +1. If applicable for your project, set the default VM. -2. Start the cluster -Use 1.13.x or newer version of Kubernetes with `--kubernetes-version` + ```bash + minikube config set vm-driver [driver_name] + ``` -```bash -minikube start --cpus=4 --memory=4096 -``` +1. Start the cluster. If necessary, specify version 1.13.x or newer of Kubernetes with `--kubernetes-version` -3. Enable dashboard and ingress addons + ```bash + minikube start --cpus=4 --memory=4096 + ``` -```bash -# Enable dashboard -minikube addons enable dashboard +1. Enable the Minikube dashboard and ingress add-ons. -# Enable ingress -minikube addons enable ingress -``` + ```bash + # Enable dashboard + minikube addons enable dashboard + + # Enable ingress + minikube addons enable ingress + ``` -## (optional) Install Helm v3 +## Install Helm v3 (optional) -1. [Install Helm v3 client](https://helm.sh/docs/intro/install/) +If you are using Helm, install the [Helm v3 client](https://helm.sh/docs/intro/install/). -> **Note:** The latest Dapr helm chart no longer supports Helm v2. Please migrate from helm v2 to helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). +{{% alert title="Important" color="warning" %}} +The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). +{{% /alert %}} -### Troubleshooting +## Troubleshooting -1. The external IP address of load balancer is not shown from `kubectl get svc` +The external IP address of load balancer is not shown from `kubectl get svc`. -In Minikube, EXTERNAL-IP in `kubectl get svc` shows `` state for your service. In this case, you can run `minikube service [service_name]` to open your service without external IP address. +In Minikube, `EXTERNAL-IP` in `kubectl get svc` shows `` state for your service. In this case, you can run `minikube service [service_name]` to open your service without external IP address. ```bash $ kubectl get svc @@ -72,3 +75,9 @@ $ minikube service calculator-front-end |-----------|----------------------|-------------|---------------------------| 🎉 Opening kubernetes service default/calculator-front-end in default browser... ``` + +## Related links +- [Try out a Dapr quickstart]({{< ref quickstarts.md >}}) +- Learn how to [deploy Dapr on your cluster]({{< ref kubernetes-deploy.md >}}) +- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) +- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md index 7b8dfacf980..bdc60e48928 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md @@ -8,99 +8,101 @@ aliases: - /getting-started/install-dapr-kubernetes/ --- -When setting up Kubernetes you can use either the Dapr CLI or Helm. +When [setting up Dapr on Kubernetes]({{< ref kubernetes-overview.md >}}), you can use either the Dapr CLI or Helm. -For more information on what is deployed to your Kubernetes cluster read the [Kubernetes overview]({{< ref kubernetes-overview.md >}}) +{{% alert title="Hybrid clusters" color="primary" %}} +Both the Dapr CLI and the Dapr Helm chart automatically deploy with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy Dapr to Windows nodes if your application requires it. For more information, see [Deploying to a hybrid Linux/Windows Kubernetes cluster]({{< ref kubernetes-hybrid-clusters >}}). +{{% /alert %}} -## Prerequisites +{{< tabs "Dapr CLI" "Helm" >}} + +{{% codetab %}} +## Install with Dapr CLI -- Install [Dapr CLI]({{< ref install-dapr-cli.md >}}) -- Install [kubectl](https://kubernetes.io/docs/tasks/tools/) -- Kubernetes cluster (see below if needed) +You can install Dapr on a Kubernetes cluster using the [Dapr CLI]({{< ref install-dapr-cli.md >}}). -### Create cluster +### Prerequisites -You can install Dapr on any Kubernetes cluster. Here are some helpful links: +- Install: + - [Dapr CLI]({{< ref install-dapr-cli.md >}}) + - [kubectl](https://kubernetes.io/docs/tasks/tools/) +- Create a Kubernetes cluster with Dapr. Here are some helpful links: + - [Set up KiNd Cluster]({{< ref setup-kind.md >}}) + - [Set up Minikube Cluster]({{< ref setup-minikube.md >}}) + - [Set up Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}}) + - [Set up GKE cluster]({{< ref setup-gke.md >}}) + - [Set up Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) -- [Setup KiNd Cluster]({{< ref setup-kind.md >}}) -- [Setup Minikube Cluster]({{< ref setup-minikube.md >}}) -- [Setup Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}}) -- [Setup Google Cloud Kubernetes Engine](https://docs.dapr.io/operations/hosting/kubernetes/cluster/setup-gke/) -- [Setup Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) -{{% alert title="Hybrid clusters" color="primary" %}} -Both the Dapr CLI and the Dapr Helm chart automatically deploy with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy Dapr to Windows nodes if your application requires it. For more information see [Deploying to a hybrid Linux/Windows Kubernetes cluster]({{}}). -{{% /alert %}} +### Installation options +You can install Dapr from an official Helm chart or a private chart, using a custom namespace, etc. -## Install with Dapr CLI - -You can install Dapr to a Kubernetes cluster using the [Dapr CLI]({{< ref install-dapr-cli.md >}}). - -### Install Dapr (from an official Dapr Helm chart) +#### Install Dapr from an official Dapr Helm chart The `-k` flag initializes Dapr on the Kubernetes cluster in your current context. -{{% alert title="Ensure correct cluster is set" color="warning" %}} -Make sure the correct "target" cluster is set. Check `kubectl context (kubectl config get-contexts)` to verify. You can set a different context using `kubectl config use-context `. -{{% /alert %}} - -Run the following command on your local machine to init Dapr on your cluster: +1. Verify the correct "target" cluster is set by checking `kubectl context (kubectl config get-contexts)`. + - You can set a different context using `kubectl config use-context `. -```bash -dapr init -k -``` +1. Initialize Dapr on your cluster with the following command: -```bash -⌛ Making the jump to hyperspace... + ```bash + dapr init -k + ``` -✅ Deploying the Dapr control plane to your cluster... -✅ Success! Dapr has been installed to namespace dapr-system. To verify, run "dapr status -k" in your terminal. To get started, go here: https://aka.ms/dapr-getting-started -``` + **Expected output** + + ```bash + ⌛ Making the jump to hyperspace... + + ✅ Deploying the Dapr control plane to your cluster... + ✅ Success! Dapr has been installed to namespace dapr-system. To verify, run "dapr status -k" in your terminal. To get started, go here: https://aka.ms/dapr-getting-started + ``` + +1. Run the dashboard: -To run the dashboard, run: + ```bash + dapr dashboard -k + ``` -```bash -dapr dashboard -k -``` + If you installed Dapr in a **non-default namespace**, run: + + ```bash + dapr dashboard -k -n + ``` -If you installed Dapr in a non-default namespace, run: +#### Install Dapr from a private Dapr Helm chart -```bash -dapr dashboard -k -n -``` +Installing Dapr from a private Helm chart can be helpful for when you: +- Need more granular control of the Dapr Helm chart +- Have a custom Dapr deployment +- Pull Helm charts from trusted registries that are managed and maintained by your organization -### Install Dapr (a private Dapr Helm chart) -There are some scenarios where it's necessary to install Dapr from a private Helm chart, such as: -- needing more granular control of the Dapr Helm chart -- having a custom Dapr deployment -- pulling Helm charts from trusted registries that are managed and maintained by your organization +Set the following parameters to allow `dapr init -k` to install Dapr images from the configured Helm repository. ``` export DAPR_HELM_REPO_URL="https://helm.custom-domain.com/dapr/dapr" export DAPR_HELM_REPO_USERNAME="username_xxx" export DAPR_HELM_REPO_PASSWORD="passwd_xxx" ``` +#### Install in high availability mode -Setting the above parameters will allow `dapr init -k` to install Dapr images from the configured Helm repository. - -### Install in custom namespace - -The default namespace when initializing Dapr is `dapr-system`. You can override this with the `-n` flag. +You can run Dapr with three replicas of each control plane pod in the `dapr-system` namespace for [production scenarios]({{< ref kubernetes-production.md >}}). ```bash -dapr init -k -n mynamespace +dapr init -k --enable-ha=true ``` -### Install in highly available mode +#### Install in custom namespace -You can run Dapr with 3 replicas of each control plane pod in the dapr-system namespace for [production scenarios]({{< ref kubernetes-production.md >}}). +The default namespace when initializing Dapr is `dapr-system`. You can override this with the `-n` flag. ```bash -dapr init -k --enable-ha=true +dapr init -k -n mynamespace ``` -### Disable mTLS +#### Disable mTLS Dapr is initialized by default with [mTLS]({{< ref "security-concept.md#sidecar-to-sidecar-communication" >}}). You can disable it with: @@ -108,11 +110,9 @@ Dapr is initialized by default with [mTLS]({{< ref "security-concept.md#sidecar- dapr init -k --enable-mtls=false ``` -### Wait for the installation to complete - - You can wait for the installation to complete its deployment with the `--wait` flag. +#### Wait for the installation to complete - The default timeout is 300s (5 min), but can be customized with the `--timeout` flag. +You can wait for the installation to complete its deployment with the `--wait` flag. The default timeout is 300s (5 min), but can be customized with the `--timeout` flag. ```bash dapr init -k --wait --timeout 600 @@ -126,18 +126,33 @@ Run the following command on your local machine to uninstall Dapr on your cluste dapr uninstall -k ``` -## Install with Helm (advanced) +{{% /codetab %}} -You can install Dapr on Kubernetes using a Helm 3 chart. + +{{% codetab %}} + +## Install with Helm + +You can install Dapr on Kubernetes using a Helm v3 chart. + +❗**Important:** The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). + +### Prerequisites + +- Install: + - [Helm v3](https://helm.sh/docs/intro/install/) + - [kubectl](https://kubernetes.io/docs/tasks/tools/) +- Create a Kubernetes cluster with Dapr. Here are some helpful links: + - [Set up KiNd Cluster]({{< ref setup-kind.md >}}) + - [Set up Minikube Cluster]({{< ref setup-minikube.md >}}) + - [Set up Azure Kubernetes Service Cluster]({{< ref setup-aks.md >}}) + - [Set up GKE cluster]({{< ref setup-gke.md >}}) + - [Set up Amazon Elastic Kubernetes Service](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html) -{{% alert title="Ensure you are on Helm v3" color="primary" %}} -The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm v2 to Helm v3 by following [this guide](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). -{{% /alert %}} ### Add and install Dapr Helm chart -1. Make sure [Helm 3](https://github.com/helm/helm/releases) is installed on your machine -1. Add Helm repo and update +1. Add the Helm repo and update: ```bash // Add the official Dapr Helm chart. @@ -160,7 +175,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm --wait ``` - To install in high availability mode: + To install in **high availability** mode: ```bash helm upgrade --install dapr dapr/dapr \ @@ -173,18 +188,7 @@ The latest Dapr helm chart no longer supports Helm v2. Please migrate from Helm See [Guidelines for production ready deployments on Kubernetes]({{< ref kubernetes-production.md >}}) for more information on installing and upgrading Dapr using Helm. -### Uninstall Dapr on Kubernetes - -```bash -helm uninstall dapr --namespace dapr-system -``` - -### More information - -- Read [this guide]({{< ref kubernetes-production.md >}}) for recommended Helm chart values for production setups -- See [this page](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) for details on Dapr Helm charts. - -## Installing the Dapr dashboard as part of the control plane +### (optional) Install the Dapr dashboard as part of the control plane If you want to install the Dapr dashboard, use this Helm chart with the additional settings of your choice: @@ -200,9 +204,9 @@ kubectl create namespace dapr-system helm install dapr dapr/dapr-dashboard --namespace dapr-system ``` -## Verify installation +### Verify installation -Once the installation is complete, verify that the dapr-operator, dapr-placement, dapr-sidecar-injector and dapr-sentry pods are running in the `dapr-system` namespace: +Once the installation is complete, verify that the `dapr-operator`, `dapr-placement`, `dapr-sidecar-injector`, and `dapr-sentry` pods are running in the `dapr-system` namespace: ```bash kubectl get pods --namespace dapr-system @@ -217,14 +221,44 @@ dapr-sidecar-injector-8555576b6f-29cqm 1/1 Running 0 40s dapr-sentry-9435776c7f-8f7yd 1/1 Running 0 40s ``` -## Using Mariner-based images +### Uninstall Dapr on Kubernetes + +```bash +helm uninstall dapr --namespace dapr-system +``` + +### More information + +- Read [the Kubernetes productions guidelines]({{< ref kubernetes-production.md >}}) for recommended Helm chart values for production setups +- [More details on Dapr Helm charts](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) + +{{% /codetab %}} + +{{< /tabs >}} + +### Use Mariner-based images -When deploying Dapr, whether on Kubernetes or in Docker self-hosted, the default container images that are pulled are based on [*distroless*](https://github.com/GoogleContainerTools/distroless). +The default container images pulled on Kubernetes are based on [*distroless*](https://github.com/GoogleContainerTools/distroless). Alternatively, you can use Dapr container images based on Mariner 2 (minimal distroless). [Mariner](https://github.com/microsoft/CBL-Mariner/), officially known as CBL-Mariner, is a free and open-source Linux distribution and container base image maintained by Microsoft. For some Dapr users, leveraging container images based on Mariner can help you meet compliance requirements. To use Mariner-based images for Dapr, you need to add `-mariner` to your Docker tags. For example, while `ghcr.io/dapr/dapr:latest` is the Docker image based on *distroless*, `ghcr.io/dapr/dapr:latest-mariner` is based on Mariner. Tags pinned to a specific version are also available, such as `{{% dapr-latest-version short="true" %}}-mariner`. +{{< tabs "Dapr CLI" "Helm" >}} + +{{% codetab %}} + +In the Dapr CLI, you can switch to using Mariner-based images with the `--image-variant` flag. + +```sh +dapr init --image-variant mariner +``` + +{{% /codetab %}} + + +{{% codetab %}} + With Kubernetes and Helm, you can use Mariner-based images by setting the `global.tag` option and adding `-mariner`. For example: ```sh @@ -236,6 +270,12 @@ helm upgrade --install dapr dapr/dapr \ --wait ``` -## Next steps +{{% /codetab %}} + +{{< /tabs >}} +## Related links +- [Deploy Dapr with Helm parameters and other details]({{< ref "kubernetes-production.md#deploy-dapr-with-helm" >}}) +- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) +- [Kubernetes production guidelines]({{< ref kubernetes-production.md >}}) - [Configure state store & pubsub message broker]({{< ref "getting-started/tutorials/configure-state-pubsub.md" >}}) diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-hybrid-clusters.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-hybrid-clusters.md index ec57289a4f4..b76d682fa0f 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-hybrid-clusters.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-hybrid-clusters.md @@ -6,24 +6,30 @@ weight: 60000 description: "How to run Dapr apps on Kubernetes clusters with Windows nodes" --- -Dapr supports running on Kubernetes clusters with Windows nodes. You can run your Dapr microservices exclusively on Windows, exclusively on Linux, or a combination of both. This is helpful to users who may be doing a piecemeal migration of a legacy application into a Dapr Kubernetes cluster. +Dapr supports running your microservices on Kubernetes clusters on: +- Windows +- Linux +- A combination of both -Kubernetes uses a concept called node affinity so that you can denote whether you want your application to be launched on a Linux node or a Windows node. When deploying to a cluster which has both Windows and Linux nodes, you must provide affinity rules for your applications, otherwise the Kubernetes scheduler might launch your application on the wrong type of node. +This is especially helpful during a piecemeal migration of a legacy application into a Dapr Kubernetes cluster. -## Pre-requisites +Kubernetes uses a concept called **node affinity** to denote whether you want your application to be launched on a Linux node or a Windows node. When deploying to a cluster which has both Windows and Linux nodes, you must provide affinity rules for your applications, otherwise the Kubernetes scheduler might launch your application on the wrong type of node. -You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers support the automatic provisioning of Windows enabled Kubernetes clusters. +## Prerequisites -1. Follow your preferred provider's instructions for setting up a cluster with Windows enabled +Before you begin, set up a Kubernetes cluster with Windows nodes. Many Kubernetes providers support the automatic provisioning of Windows enabled Kubernetes clusters. -- [Setting up Windows on Azure AKS](https://docs.microsoft.com/azure/aks/windows-container-cli) -- [Setting up Windows on AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) -- [Setting up Windows on Google Cloud GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows) +1. Follow your preferred provider's instructions for setting up a cluster with Windows enabled. + + - [Setting up Windows on Azure AKS](https://docs.microsoft.com/azure/aks/windows-container-cli) + - [Setting up Windows on AWS EKS](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html) + - [Setting up Windows on Google Cloud GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/creating-a-cluster-windows) -2. Once you have set up the cluster, you should see that it has both Windows and Linux nodes available +1. Once you have set up the cluster, verify that both Windows and Linux nodes are available. ```bash kubectl get nodes -o wide + NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME aks-nodepool1-11819434-vmss000000 Ready agent 6d v1.17.9 10.240.0.4 Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure aks-nodepool1-11819434-vmss000001 Ready agent 6d v1.17.9 10.240.0.35 Ubuntu 16.04.6 LTS 4.15.0-1092-azure docker://3.0.10+azure @@ -31,29 +37,31 @@ You will need a Kubernetes cluster with Windows nodes. Many Kubernetes providers akswin000000 Ready agent 6d v1.17.9 10.240.0.66 Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5 akswin000001 Ready agent 6d v1.17.9 10.240.0.97 Windows Server 2019 Datacenter 10.0.17763.1339 docker://19.3.5 ``` -## Installing the Dapr control plane -If you are installing using the Dapr CLI or via a helm chart, simply follow the normal deployment procedures: -[Installing Dapr on a Kubernetes cluster]({{< ref "install-dapr-selfhost.md#installing-Dapr-on-a-kubernetes-cluster" >}}) +## Install the Dapr control plane + +If you are installing using the Dapr CLI or via a Helm chart, simply follow the normal deployment procedures: [Installing Dapr on a Kubernetes cluster]({{< ref "install-dapr-selfhost.md#installing-Dapr-on-a-kubernetes-cluster" >}}) Affinity will be automatically set for `kubernetes.io/os=linux`. This will be sufficient for most users, as Kubernetes requires at least one Linux node pool. -> **Note:** Dapr control plane containers are built and tested for both Windows and Linux, however, we generally recommend using the Linux control plane containers. They tend to be smaller and have a much larger user base. +{{% alert title="Note" color="primary" %}} +Dapr control plane containers are built and tested for both Windows and Linux. However, it's recommended to use the Linux control plane containers, which tend to be smaller and have a much larger user base. If you understand the above, but want to deploy the Dapr control plane to Windows, you can do so by setting: -``` +```sh helm install dapr dapr/dapr --set global.daprControlPlaneOs=windows ``` +{{% /alert %}} -## Installing Dapr applications +## Install Dapr applications ### Windows applications -In order to launch a Dapr application on Windows, you'll first need to create a Docker container with your application installed. For a step by step guide see [Get started: Prep Windows for containers](https://docs.microsoft.com/virtualization/windowscontainers/quick-start/set-up-environment). Once you have a docker container with your application, create a deployment YAML file with node affinity set to kubernetes.io/os: windows. -1. Create a deployment YAML +1. [Follow the Microsoft documentation to create a Docker Windows container with your application installed](https://learn.microsoft.com/virtualization/windowscontainers/quick-start/set-up-environment?tabs=dockerce). + +1. Once you've created a Docker container with your application, create a deployment YAML file with the node affinity set to `kubernetes.io/os: windows`. In the example `deploy_windows.yaml` deployment file below: - Here is a sample deployment with nodeAffinity set to "windows". Modify as needed for your application. ```yaml apiVersion: apps/v1 kind: Deployment @@ -92,9 +100,8 @@ In order to launch a Dapr application on Windows, you'll first need to create a values: - windows ``` - This deployment yaml will be the same as any other dapr application, with an additional spec.template.spec.affinity section as shown above. - -2. Deploy to your Kubernetes cluster + +1. Deploy the YAML file to your Kubernetes cluster. ```bash kubectl apply -f deploy_windows.yaml @@ -102,11 +109,10 @@ In order to launch a Dapr application on Windows, you'll first need to create a ### Linux applications -If you already have a Dapr application that runs on Linux, you'll still need to add affinity rules as above, but choose Linux affinity instead. +If you already have a Dapr application that runs on Linux, you still need to add affinity rules. -1. Create a deployment YAML +1. Create a deployment YAML file with the node affinity set to `kubernetes.io/os: linux`. In the example `deploy_linux.yaml` deployment file below: - Here is a sample deployment with nodeAffinity set to "linux". Modify as needed for your application. ```yaml apiVersion: apps/v1 kind: Deployment @@ -146,13 +152,17 @@ If you already have a Dapr application that runs on Linux, you'll still need to - linux ``` -2. Deploy to your Kubernetes cluster +1. Deploy the YAML to your Kubernetes cluster. ```bash kubectl apply -f deploy_linux.yaml ``` -## Cleanup +That's it! + +## Clean up + +To remove the deployments from this guide, run the following commands: ```bash kubectl delete -f deploy_linux.yaml diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-job.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-job.md index 0b2139fddc7..bd8c8f5eff5 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-job.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-job.md @@ -7,19 +7,19 @@ description: "Use Dapr API in a Kubernetes Job context" type: docs --- -# Kubernetes Job +The Dapr sidecar is designed to be a long running process. In the context of a [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) this behavior can block your job completion. -The Dapr sidecar is designed to be a long running process, in the context of a [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) this behaviour can block your job completion. -To address this issue the Dapr sidecar has an endpoint to `Shutdown` the sidecar. +To address this issue, the Dapr sidecar has an endpoint to `Shutdown` the sidecar. -When running a basic [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) you will need to call the `/shutdown` endpoint for the sidecar to gracefully stop and the job will be considered `Completed`. +When running a basic [Kubernetes Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/), you need to call the `/shutdown` endpoint for the sidecar to gracefully stop and the job to be considered `Completed`. -When a job is finished without calling `Shutdown`, your job will be in a `NotReady` state with only the `daprd` container running endlessly. +When a job is finished without calling `Shutdown`, your job is in a `NotReady` state with only the `daprd` container running endlessly. + +Stopping the Dapr sidecar causes its readiness and liveness probes to fail in your container. -Stopping the dapr sidecar will cause its readiness and liveness probes to fail in your container because the dapr sidecar was shutdown. To prevent Kubernetes from trying to restart your job, set your job's `restartPolicy` to `Never`. -Be sure to use the *POST* HTTP verb when calling the shutdown HTTP API. +Be sure to use the *POST* HTTP verb when calling the shutdown HTTP API. For example: ```yaml apiVersion: batch/v1 @@ -40,7 +40,7 @@ spec: restartPolicy: Never ``` -You can also call the `Shutdown` from any of the Dapr SDKs +You can also call the `Shutdown` from any of the Dapr SDKs. For example, for the Go SDK: ```go package main @@ -63,3 +63,8 @@ func main() { // Job } ``` + +## Related links + +- [Deploy Dapr on Kubernetes]({{< ref kubernetes-deploy.md >}}) +- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-overview.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-overview.md index 44cadef4b9f..7ad299dbe94 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-overview.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-overview.md @@ -6,23 +6,30 @@ weight: 10000 description: "Overview of how to get Dapr running on your Kubernetes cluster" --- -## Dapr on Kubernetes +Dapr can be configured to run on any supported versions of Kubernetes. To achieve this, Dapr begins by deploying the following Kubernetes services, which provide first-class integration to make running applications with Dapr easy. -Dapr can be configured to run on any supported versions of Kubernetes. To achieve this, Dapr begins by deploying the `dapr-sidecar-injector`, `dapr-operator`, `dapr-placement`, and `dapr-sentry` Kubernetes services. These provide first-class integration to make running applications with Dapr easy. -- **dapr-operator:** Manages [component]({{< ref components >}}) updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.) -- **dapr-sidecar-injector:** Injects Dapr into [annotated](#adding-dapr-to-a-kubernetes-deployment) deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT` to enable user-defined applications to easily communicate with Dapr without hard-coding Dapr port values. -- **dapr-placement:** Used for [actors]({{< ref actors >}}) only. Creates mapping tables that map actor instances to pods -- **dapr-sentry:** Manages mTLS between services and acts as a certificate authority. For more information read the [security overview]({{< ref "security-concept.md" >}}). +| Kubernetes services | Description | +| ------------------- | ----------- | +| `dapr-operator` | Manages [component]({{< ref components >}}) updates and Kubernetes services endpoints for Dapr (state stores, pub/subs, etc.) | +| `dapr-sidecar-injector` | Injects Dapr into [annotated](#adding-dapr-to-a-kubernetes-deployment) deployment pods and adds the environment variables `DAPR_HTTP_PORT` and `DAPR_GRPC_PORT` to enable user-defined applications to easily communicate with Dapr without hard-coding Dapr port values. | +| `dapr-placement` | Used for [actors]({{< ref actors >}}) only. Creates mapping tables that map actor instances to pods | +| `dapr-sentry` | Manages mTLS between services and acts as a certificate authority. For more information read the [security overview]({{< ref "security-concept.md" >}}) | +## Supported versions +Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy). + ## Deploying Dapr to a Kubernetes cluster -Read [this guide]({{< ref kubernetes-deploy.md >}}) to learn how to deploy Dapr to your Kubernetes cluster. +Read [Deploy Dapr on a Kubernetes cluster]({{< ref kubernetes-deploy.md >}}) to learn how to deploy Dapr to your Kubernetes cluster. ## Adding Dapr to a Kubernetes deployment -Deploying and running a Dapr enabled application into your Kubernetes cluster is as simple as adding a few annotations to the pods schema. To give your service an `id` and `port` known to Dapr, turn on tracing through configuration and launch the Dapr sidecar container, you annotate your Kubernetes pod like this. For more information check [dapr annotations]({{< ref arguments-annotations-overview.md >}}) +Deploying and running a Dapr-enabled application into your Kubernetes cluster is as simple as adding a few annotations to the pods schema. For example, in the following example, your Kubernetes pod is annotated to: +- Give your service an `id` and `port` known to Dapr +- Turn on tracing through configuration +- Launch the Dapr sidecar container ```yml annotations: @@ -32,20 +39,21 @@ Deploying and running a Dapr enabled application into your Kubernetes cluster is dapr.io/config: "tracing" ``` -## Pulling container images from private registries +For more information, check [Dapr annotations]({{< ref arguments-annotations-overview.md >}}). -Dapr works seamlessly with any user application container image, regardless of its origin. Simply init Dapr and add the [Dapr annotations]({{< ref arguments-annotations-overview.md >}}) to your Kubernetes definition to add the Dapr sidecar. +## Pulling container images from private registries -The Dapr control-plane and sidecar images come from the [daprio Docker Hub](https://hub.docker.com/u/daprio) container registry, which is a public registry. +Dapr works seamlessly with any user application container image, regardless of its origin. Simply [initialize Dapr]({{< ref install-dapr-selfhost.md >}}) and add the [Dapr annotations]({{< ref arguments-annotations-overview.md >}}) to your Kubernetes definition to add the Dapr sidecar. -For information about pulling your application images from a private registry, reference the [Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). If you are using Azure Container Registry with Azure Kubernetes Service, reference the [AKS documentation](https://docs.microsoft.com/azure/aks/cluster-container-registry-integration). +The Dapr control plane and sidecar images come from the [daprio Docker Hub](https://hub.docker.com/u/daprio) container registry, which is a public registry. -## Quickstart +For information about: +- Pulling your application images from a private registry, reference the [official Kubernetes documentation](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). +- Using Azure Container Registry with Azure Kubernetes Service, reference the [AKS documentation](https://docs.microsoft.com/azure/aks/cluster-container-registry-integration). -You can see some examples [here](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes) in the Kubernetes getting started quickstart. +## Tutorials -## Supported versions -Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy). +[Work through the Hello Kubernetes tutorial](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes) to learn more about getting started with Dapr on your Kubernetes cluster. ## Related links diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-production.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-production.md index e958ea8c34c..365187bc991 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-production.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-production.md @@ -3,16 +3,14 @@ type: docs title: "Production guidelines on Kubernetes" linkTitle: "Production guidelines" weight: 40000 -description: "Recommendations and practices for deploying Dapr to a Kubernetes cluster in a production-ready configuration" +description: "Best practices for deploying Dapr to a Kubernetes cluster in a production-ready configuration" --- ## Cluster and capacity requirements Dapr support for Kubernetes is aligned with [Kubernetes Version Skew Policy](https://kubernetes.io/releases/version-skew-policy/). -For a production-ready Kubernetes cluster deployment, we recommended you run a cluster of at least 3 worker nodes to support a highly-available control plane installation. - -Use the following resource settings as a starting point. Requirements will vary depending on cluster size, number of pods, and other factors, so you should perform individual testing to find the right values for your environment: +Use the following resource settings as a starting point. Requirements vary depending on cluster size, number of pods, and other factors. Perform individual testing to find the right values for your environment. | Deployment | CPU | Memory |-------------|-----|------- @@ -23,7 +21,7 @@ Use the following resource settings as a starting point. Requirements will vary | **Dashboard** | Limit: 200m, Request: 50m | Limit: 200Mi, Request: 20Mi {{% alert title="Note" color="primary" %}} -For more info, read the [concept article on CPU and Memory resource units and their meaning](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes). +For more information, refer to the Kubernetes documentation on [CPU and Memory resource units and their meaning](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#resource-units-in-kubernetes). {{% /alert %}} ### Helm @@ -32,29 +30,26 @@ When installing Dapr using Helm, no default limit/request values are set. Each c The [Helm chart readme](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md) has detailed information and examples. -For local/dev installations, you might simply want to skip configuring the `resources` options. +For local/dev installations, you might want to skip configuring the `resources` options. ### Optional components The following Dapr control plane deployments are optional: -- **Placement**: needed to use Dapr Actors -- **Sentry**: needed for mTLS for service to service invocation -- **Dashboard**: needed to get an operational view of the cluster +- **Placement**: For using Dapr Actors +- **Sentry**: For mTLS for service-to-service invocation +- **Dashboard**: For an operational view of the cluster ## Sidecar resource settings -To set the resource assignments for the Dapr sidecar, see the annotations [here]({{< ref "arguments-annotations-overview.md" >}}). -The specific annotations related to resource constraints are: +[Set the resource assignments for the Dapr sidecar using the supported annotations]({{< ref "arguments-annotations-overview.md" >}}). The specific annotations related to **resource constraints** are: - `dapr.io/sidecar-cpu-limit` - `dapr.io/sidecar-memory-limit` - `dapr.io/sidecar-cpu-request` - `dapr.io/sidecar-memory-request` -If not set, the Dapr sidecar will run without resource settings, which may lead to issues. For a production-ready setup it is strongly recommended to configure these settings. - -For more details on configuring resource in Kubernetes see [Assign Memory Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/) and [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/). +If not set, the Dapr sidecar runs without resource settings, which may lead to issues. For a production-ready setup, it's strongly recommended to configure these settings. Example settings for the Dapr sidecar in a production-ready setup: @@ -62,31 +57,56 @@ Example settings for the Dapr sidecar in a production-ready setup: |-----|--------| | Limit: 300m, Request: 100m | Limit: 1000Mi, Request: 250Mi +The CPU and memory limits above account for Dapr supporting a high number of I/O bound operations. Use a [monitoring tool]({{< ref observability >}}) to get a baseline for the sidecar (and app) containers and tune these settings based on those baselines. + +For more details on configuring resource in Kubernetes, see the following Kubernetes guides: +- [Assign Memory Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/) +- [Assign CPU Resources to Containers and Pods](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/) + {{% alert title="Note" color="primary" %}} -Since Dapr is intended to do much of the I/O heavy lifting for your app, it's expected that the resources given to Dapr enable you to drastically reduce the resource allocations for the application. +Since Dapr is intended to do much of the I/O heavy lifting for your app, the resources given to Dapr drastically reduce the resource allocations for the application. {{% /alert %}} -The CPU and memory limits above account for the fact that Dapr is intended to support a high number of I/O bound operations. It is strongly recommended that you use a monitoring tool to get a baseline for the sidecar (and app) containers and tune these settings based on those baselines. +### Setting soft memory limits on Dapr sidecar + +Set soft memory limits on the Dapr sidecar when you've set up memory limits. With soft memory limits, the sidecar garbage collector frees up memory once it exceeds the limit instead of waiting for it to be double of the last amount of memory present in the heap when it was run. Waiting is the default behavior of the [garbage collector](https://tip.golang.org/doc/gc-guide#Memory_limit) used in Go, and can lead to OOM Kill events. + +For example, for an app with app-id `nodeapp` with memory limit set to 1000Mi, you can use the following in your pod annotations: + +```yaml + annotations: + dapr.io/enabled: "true" + dapr.io/app-id: "nodeapp" + # our daprd memory settings + dapr.io/sidecar-memory-limit: "1000Mi" # your memory limit + dapr.io/env: "GOMEMLIMIT=900MiB" # 90% of your memory limit. Also notice the suffix "MiB" instead of "Mi" +``` + +In this example, the soft limit has been set to be 90% to leave 5-10% for other services, [as recommended](https://tip.golang.org/doc/gc-guide#Memory_limit). -## Highly-available mode +The `GOMEMLIMIT` environment variable [allows certain suffixes for the memory size: `B`, `KiB`, `MiB`, `GiB`, and `TiB`.](https://pkg.go.dev/runtime) -When deploying Dapr in a production-ready configuration, it is recommend to deploy with a highly available (HA) configuration of the control plane, which creates 3 replicas of each control plane pod in the dapr-system namespace. This configuration allows the Dapr control plane to retain 3 running instances and survive individual node failures and other outages. +## High availability mode -For a new Dapr deployment, the HA mode can be set with both the [Dapr CLI]({{< ref "kubernetes-deploy.md#install-in-highly-available-mode" >}}) and with [Helm charts]({{< ref "kubernetes-deploy.md#add-and-install-dapr-helm-chart" >}}). +When deploying Dapr in a production-ready configuration, it's best to deploy with a high availability (HA) configuration of the control plane. This creates three replicas of each control plane pod in the `dapr-system` namespace, allowing the Dapr control plane to retain three running instances and survive individual node failures and other outages. -For an existing Dapr deployment, enabling the HA mode requires additional steps. Please refer to [this paragraph]({{< ref "#enabling-high-availability-in-an-existing-dapr-deployment" >}}) for more details. +For a new Dapr deployment, HA mode can be set with both: +- The [Dapr CLI]({{< ref "kubernetes-deploy.md#install-in-highly-available-mode" >}}), and +- [Helm charts]({{< ref "kubernetes-deploy.md#add-and-install-dapr-helm-chart" >}}) -## Deploying Dapr with Helm +For an existing Dapr deployment, [you can enable HA mode in a few extra steps]({{< ref "#enabling-high-availability-in-an-existing-dapr-deployment" >}}). + +## Deploy Dapr with Helm [Visit the full guide on deploying Dapr with Helm]({{< ref "kubernetes-deploy.md#install-with-helm-advanced" >}}). ### Parameters file -Instead of specifying parameters on the command line, it's recommended to create a values file. This file should be checked into source control so that you can track its changes. +It's recommended to create a values file, instead of specifying parameters on the command. Check the values file into source control so that you can track its changes. -For a full list of all available options you can set in the values file (or by using the `--set` command-line option), see https://github.com/dapr/dapr/blob/master/charts/dapr/README.md. +[See a full list of available parameters and settings](https://github.com/dapr/dapr/blob/master/charts/dapr/README.md). -Instead of using either `helm install` or `helm upgrade` as shown below, you can also run `helm upgrade --install` - this will dynamically determine whether to install or upgrade. +The following command runs three replicas of each control plane service in the `dapr-system` namespace. ```bash # Add/update a official Dapr Helm repo. @@ -119,84 +139,85 @@ helm install dapr dapr/dapr \ kubectl get pods --namespace dapr-system ``` -This command will run 3 replicas of each control plane service in the dapr-system namespace. - {{% alert title="Note" color="primary" %}} -The Dapr Helm chart automatically deploys with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy the Dapr control plane to Windows nodes, but most users should not need to. For more information see [Deploying to a Hybrid Linux/Windows K8s Cluster]({{< ref "kubernetes-hybrid-clusters.md" >}}). - +The example above uses `helm install` and `helm upgrade`. You can also run `helm upgrade --install` to dynamically determine whether to install or upgrade. {{% /alert %}} -## Upgrading Dapr with Helm +The Dapr Helm chart automatically deploys with affinity for nodes with the label `kubernetes.io/os=linux`. You can deploy the Dapr control plane to Windows nodes. For more information, see [Deploying to a Hybrid Linux/Windows K8s Cluster]({{< ref "kubernetes-hybrid-clusters.md" >}}). -Dapr supports zero-downtime upgrades. The upgrade path includes the following steps: +## Upgrade Dapr with Helm -1. Upgrading a CLI version (optional but recommended) -2. Updating the Dapr control plane -3. Updating the data plane (Dapr sidecars) +Dapr supports zero-downtime upgrades in the following steps. -### Upgrading the CLI +### Upgrade the CLI (recommended) -To upgrade the Dapr CLI, [download the latest version](https://github.com/dapr/cli/releases) of the CLI and ensure it's in your path. +Upgrading the CLI is optional, but recommended. -### Upgrading the control plane +1. [Download the latest version](https://github.com/dapr/cli/releases) of the CLI. +1. Verify the Dapr CLI is in your path. -See [steps to upgrade Dapr on a Kubernetes cluster]({{< ref "kubernetes-upgrade.md#helm" >}}). +### Upgrade the control plane -### Updating the data plane (sidecars) +[Upgrade Dapr on a Kubernetes cluster]({{< ref "kubernetes-upgrade.md#helm" >}}). -The last step is to update pods that are running Dapr to pick up the new version of the Dapr runtime. -To do that, simply issue a rollout restart command for any deployment that has the `dapr.io/enabled` annotation: +### Update the data plane (sidecars) -```bash -kubectl rollout restart deploy/ -``` +Update pods that are running Dapr to pick up the new version of the Dapr runtime. -To see a list of all your Dapr enabled deployments, you can either use the [Dapr Dashboard](https://github.com/dapr/dashboard) or run the following command using the Dapr CLI: +1. Issue a rollout restart command for any deployment that has the `dapr.io/enabled` annotation: -```bash -dapr list -k + ```bash + kubectl rollout restart deploy/ + ``` -APP ID APP PORT AGE CREATED -nodeapp 3000 16h 2020-07-29 17:16.22 -``` +1. View a list of all your Dapr enabled deployments via either: + - The [Dapr Dashboard](https://github.com/dapr/dashboard) + - Running the following command using the Dapr CLI: -### Enabling high-availability in an existing Dapr deployment + ```bash + dapr list -k + + APP ID APP PORT AGE CREATED + nodeapp 3000 16h 2020-07-29 17:16.22 + ``` + +### Enable high availability in an existing Dapr deployment Enabling HA mode for an existing Dapr deployment requires two steps: -1. Delete the existing placement stateful set: +1. Delete the existing placement stateful set. ```bash kubectl delete statefulset.apps/dapr-placement-server -n dapr-system ``` -1. Issue the upgrade command: + You delete the placement stateful set because, in HA mode, the placement service adds [Raft](https://raft.github.io/) for leader election. However, Kubernetes only allows for limited fields in stateful sets to be patched, subsequently failing upgrade of the placement service. + + Deletion of the existing placement stateful set is safe. The agents reconnect and re-register with the newly created placement service, which persist its table in Raft. + +1. Issue the upgrade command. ```bash helm upgrade dapr ./charts/dapr -n dapr-system --set global.ha.enabled=true ``` -You delete the placement stateful set because, in the HA mode, the placement service adds [Raft](https://raft.github.io/) for leader election. However, Kubernetes only allows for limited fields in stateful sets to be patched, subsequently failing upgrade of the placement service. - -Deletion of the existing placement stateful set is safe. The agents will reconnect and re-register with the newly created placement service, which will persist its table in Raft. - ## Recommended security configuration -When properly configured, Dapr ensures secure communication. It can also make your application more secure with a number of built-in features. +When properly configured, Dapr ensures secure communication and can make your application more secure with a number of built-in features. -It is recommended that a production-ready deployment includes the following settings: +Verify your production-ready deployment includes the following settings: -1. **Mutual Authentication (mTLS)** should be enabled. Note that Dapr has mTLS on by default. For details on how to bring your own certificates, see [here]({{< ref "mtls.md#bringing-your-own-certificates" >}}) +1. **Mutual Authentication (mTLS)** is enabled. Dapr has mTLS on by default. [Learn more about how to bring your own certificates]({{< ref "mtls.md#bringing-your-own-certificates" >}}). -2. **App to Dapr API authentication** is enabled. This is the communication between your application and the Dapr sidecar. To secure the Dapr API from unauthorized application access, it is recommended to enable Dapr's token based auth. See [enable API token authentication in Dapr]({{< ref "api-token.md" >}}) for details +1. **App to Dapr API authentication** is enabled. This is the communication between your application and the Dapr sidecar. To secure the Dapr API from unauthorized application access, [enable Dapr's token-based authentication]({{< ref "api-token.md" >}}). -3. **Dapr to App API authentication** is enabled. This is the communication between Dapr and your application. This ensures that Dapr knows that it is communicating with an authorized application. See [Authenticate requests from Dapr using token authentication]({{< ref "app-api-token.md" >}}) for details +1. **Dapr to App API authentication** is enabled. This is the communication between Dapr and your application. [Let Dapr know that it is communicating with an authorized application using token authentication]({{< ref "app-api-token.md" >}}). -4. All component YAMLs should have **secret data configured in a secret store** and not hard-coded in the YAML file. See [here]({{< ref "component-secrets.md" >}}) on how to use secrets with Dapr components +1. **Component secret data is configured in a secret store** and not hard-coded in the component YAML file. [Learn how to use secrets with Dapr components]({{< ref "component-secrets.md" >}}). -5. The Dapr **control plane is installed on a dedicated namespace** such as `dapr-system`. +1. The Dapr **control plane is installed on a dedicated namespace**, such as `dapr-system`. -6. Dapr also supports **scoping components for certain applications**. This is not a required practice, and can be enabled according to your security needs. See [here]({{< ref "component-scopes.md" >}}) for more info. +1. Dapr supports and is enabled to **scope components for certain applications**. This is not a required practice. [Learn more about component scopes]({{< ref "component-scopes.md" >}}). ## Service account tokens @@ -204,47 +225,55 @@ By default, Kubernetes mounts a volume containing a [Service Account token](http When creating a new Pod (or a Deployment, StatefulSet, Job, etc), you can disable auto-mounting the Service Account token by setting `automountServiceAccountToken: false` in your pod's spec. -It is recommended that you consider deploying your apps with `automountServiceAccountToken: false` to improve the security posture of your pods, unless your apps depend on having a Service Account token. For example, you may need a Service Account token if: +It's recommended that you consider deploying your apps with `automountServiceAccountToken: false` to improve the security posture of your pods, unless your apps depend on having a Service Account token. For example, you may need a Service Account token if: -- You are using Dapr components that interact with the Kubernetes APIs, for example the [Kubernetes secret store]({{< ref "kubernetes-secret-store.md" >}}) or the [Kubernetes Events binding]{{< ref "kubernetes-binding.md" >}}). - Note that initializing Dapr components using [component secrets]({{< ref "component-secrets.md" >}}) stored as Kubernetes secrets does **not** require a Service Account token, so you can still set `automountServiceAccountToken: false` in this case. Only calling the Kubernetes secret store at runtime, using the [Secrets management]({{< ref "secrets-overview.md" >}}) building block, is impacted. -- Your own application needs to interact with the Kubernetes APIs. +- Your application needs to interact with the Kubernetes APIs. +- You are using Dapr components that interact with the Kubernetes APIs; for example, the [Kubernetes secret store]({{< ref "kubernetes-secret-store.md" >}}) or the [Kubernetes Events binding]({{< ref "kubernetes-binding.md" >}}). -Because of the reasons above, Dapr does not set `automountServiceAccountToken: false` automatically for you. However, in all situations where the Service Account is not required by your solution, it is recommended that you set this option in the pods spec. +Thus, Dapr does not set `automountServiceAccountToken: false` automatically for you. However, in all situations where the Service Account is not required by your solution, it's recommended that you set this option in the pods spec. + +{{% alert title="Note" color="primary" %}} +Initializing Dapr components using [component secrets]({{< ref "component-secrets.md" >}}) stored as Kubernetes secrets does **not** require a Service Account token, so you can still set `automountServiceAccountToken: false` in this case. Only calling the Kubernetes secret store at runtime, using the [Secrets management]({{< ref "secrets-overview.md" >}}) building block, is impacted. +{{% /alert %}} ## Tracing and metrics configuration -Dapr has tracing and metrics enabled by default. It is *recommended* that you set up distributed tracing and metrics for your applications and the Dapr control plane in production. +Tracing and metrics are enabled in Dapr by default. It's recommended that you set up distributed tracing and metrics for your applications and the Dapr control plane in production. -If you already have your own observability set-up, you can disable tracing and metrics for Dapr. +If you already have your own observability setup, you can disable tracing and metrics for Dapr. ### Tracing -To configure a tracing backend for Dapr visit [this]({{< ref "setup-tracing.md" >}}) link. +[Configure a tracing backend for Dapr]({{< ref "setup-tracing.md" >}}). ### Metrics -For metrics, Dapr exposes a Prometheus endpoint listening on port 9090 which can be scraped by Prometheus. +For metrics, Dapr exposes a Prometheus endpoint listening on port 9090, which can be scraped by Prometheus. -To setup Prometheus, Grafana and other monitoring tools with Dapr, visit [this]({{< ref "monitoring" >}}) link. +[Set up Prometheus, Grafana, and other monitoring tools with Dapr]({{< ref "observability" >}}). ## Injector watchdog -The Dapr Operator service includes an _injector watchdog_ which can be used to detect and remediate situations where your application's pods may be deployed without the Dapr sidecar (the `daprd` container) when they should have been. For example, it can assist with recovering the applications after a total cluster failure. +The Dapr Operator service includes an **injector watchdog**, which can be used to detect and remediate situations where your application's pods may be deployed without the Dapr sidecar (the `daprd` container). For example, it can assist with recovering the applications after a total cluster failure. -The injector watchdog is disabled by default when running Dapr in Kubernetes mode and it is recommended that you consider enabling it with values that are appropriate for your specific situation. +The injector watchdog is disabled by default when running Dapr in Kubernetes mode. However, you should consider enabling it with the appropriate values for your specific situation. -Refer to the documentation for the [Dapr operator]({{< ref operator >}}) service for more details on the injector watchdog and how to enable it. +Refer to the [Dapr operator service documentation]({{< ref operator >}}) for more details on the injector watchdog and how to enable it. -## Configuring seccompProfile for sidecar containers +## Configure `seccompProfile` for sidecar containers -By default, the Dapr sidecar Injector injects a sidecar without any `seccompProfile`. However, to have Dapr sidecar container run successfully in a namespace with [Restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) profile, the sidecar container needs to have `securityContext.seccompProfile.Type` to not be `nil`. +By default, the Dapr sidecar injector injects a sidecar without any `seccompProfile`. However, for the Dapr sidecar container to run successfully in a namespace with the [Restricted](https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted) profile, the sidecar container needs `securityContext.seccompProfile.Type` to not be `nil`. -Refer to [this]({{< ref "arguments-annotations-overview.md" >}}) documentation to set appropriate `seccompProfile` on sidecar container according to which profile it is running with. +Refer to [the Arguments and Annotations overview]({{< ref "arguments-annotations-overview.md" >}}) to set the appropriate `seccompProfile` on the sidecar container. ## Best Practices -Watch this video for a deep dive into the best practices for running Dapr in production with Kubernetes +Watch this video for a deep dive into the best practices for running Dapr in production with Kubernetes.
+ +## Related links + +- [Deploy Dapr on Kubernetes]({{< ref kubernetes-deploy.md >}}) +- [Upgrade Dapr on Kubernetes]({{< ref kubernetes-upgrade.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-upgrade.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-upgrade.md index f62624bc628..aa26e270496 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-upgrade.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-upgrade.md @@ -6,34 +6,37 @@ weight: 30000 description: "Follow these steps to upgrade Dapr on Kubernetes and ensure a smooth upgrade." --- -## Prerequisites - -- [Dapr CLI]({{< ref install-dapr-cli.md >}}) -- [Helm 3](https://github.com/helm/helm/releases) (if using Helm) - -## Upgrade existing cluster to {{% dapr-latest-version long="true" %}} -There are two ways to upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm. +You can upgrade the Dapr control plane on a Kubernetes cluster using either the Dapr CLI or Helm. {{% alert title="Note" color="primary" %}} -Refer to the [Dapr version policy]({{< ref "support-release-policy.md#upgrade-paths" >}}) for guidance on which versions of Dapr can be upgraded to which versions. +Refer to the [Dapr version policy]({{< ref "support-release-policy.md#upgrade-paths" >}}) for guidance on Dapr's upgrade path. {{% /alert %}} -### Dapr CLI +{{< tabs "Dapr CLI" "Helm" >}} + +{{% codetab %}} +## Upgrade using the Dapr CLI -The example below shows how to upgrade to version {{% dapr-latest-version long="true" %}}: +You can upgrade Dapr using the [Dapr CLI]({{< ref install-dapr-cli.md >}}). - ```bash - dapr upgrade -k --runtime-version={{% dapr-latest-version long="true" %}} - ``` +### Prerequisites -You can provide all the available Helm chart configurations using the Dapr CLI. -See [here](https://github.com/dapr/cli#supplying-helm-values) for more info. +- [Install the Dapr CLI]({{< ref install-dapr-cli.md >}}) +- An existing [Kubernetes cluster running with Dapr]({{< ref cluster >}}) -#### Troubleshooting upgrade using the CLI +### Upgrade existing cluster to {{% dapr-latest-version long="true" %}} + +```bash +dapr upgrade -k --runtime-version={{% dapr-latest-version long="true" %}} +``` + +[You can provide all the available Helm chart configurations using the Dapr CLI.](https://github.com/dapr/cli#supplying-helm-values) + +### Troubleshoot upgrading via the CLI There is a known issue running upgrades on clusters that may have previously had a version prior to 1.0.0-rc.2 installed on a cluster. -Most users should not encounter this issue, but there are a few upgrade path edge cases that may leave an incompatible CustomResourceDefinition installed on your cluster. The error message for this case looks like this: +While this issue is uncommon, a few upgrade path edge cases may leave an incompatible `CustomResourceDefinition` installed on your cluster. If this is your scenario, you may see an error message like the following: ``` ❌ Failed to upgrade Dapr: Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply @@ -41,31 +44,45 @@ The CustomResourceDefinition "configurations.dapr.io" is invalid: spec.preserveU ``` -To resolve this issue please run the follow command to upgrade the CustomResourceDefinition to a compatible version: +#### Solution -``` -kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/5a15b3e0f093d2d0938b12f144c7047474a290fe/charts/dapr/crds/configuration.yaml -``` +1. Run the following command to upgrade the `CustomResourceDefinition` to a compatible version: + + ```sh + kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/5a15b3e0f093d2d0938b12f144c7047474a290fe/charts/dapr/crds/configuration.yaml + ``` + +1. Proceed with the `dapr upgrade --runtime-version {{% dapr-latest-version long="true" %}} -k` command. + +{{% /codetab %}} -Then proceed with the `dapr upgrade --runtime-version {{% dapr-latest-version long="true" %}} -k` command as above. + +{{% codetab %}} +## Upgrade using Helm -### Helm +You can upgrade Dapr using a Helm v3 chart. -From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive action since existing certificate values will automatically be re-used. +❗**Important:** The latest Dapr Helm chart no longer supports Helm v2. [Migrate from Helm v2 to Helm v3](https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/). -1. Upgrade Dapr from 1.0.0 (or newer) to any [NEW VERSION] > 1.0.0: +### Prerequisites - *Helm does not handle upgrading CRDs, so you need to perform that manually. CRDs are backward-compatible and should only be installed forward.* +- [Install Helm v3](https://github.com/helm/helm/releases) +- An existing [Kubernetes cluster running with Dapr]({{< ref cluster >}}) - >Note: The Dapr version is included in the commands below. +### Upgrade existing cluster to {{% dapr-latest-version long="true" %}} - For version {{% dapr-latest-version long="true" %}}: +As of version 1.0.0 onwards, existing certificate values will automatically be reused when upgrading Dapr using Helm. + +> **Note** Helm does not handle upgrading resources, so you need to perform that manually. Resources are backward-compatible and should only be installed forward. + +1. Upgrade Dapr to version {{% dapr-latest-version long="true" %}}: ```bash kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/components.yaml kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/configuration.yaml kubectl replace -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/subscription.yaml kubectl apply -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/resiliency.yaml + kubectl apply -f https://raw.githubusercontent.com/dapr/dapr/v{{% dapr-latest-version long="true" %}}/charts/dapr/crds/httpendpoints.yaml ``` ```bash @@ -75,9 +92,9 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive ```bash helm upgrade dapr dapr/dapr --version {{% dapr-latest-version long="true" %}} --namespace dapr-system --wait ``` - *If you're using a values file, remember to add the `--values` option when running the upgrade command.* + > If you're using a values file, remember to add the `--values` option when running the upgrade command.* -2. Ensure all pods are running: +1. Ensure all pods are running: ```bash kubectl get pods -n dapr-system -w @@ -90,20 +107,23 @@ From version 1.0.0 onwards, upgrading Dapr using Helm is no longer a disruptive dapr-sidecar-injector-68f868668f-6xnbt 1/1 Running 0 41s ``` -3. Restart your application deployments to update the Dapr runtime: +1. Restart your application deployments to update the Dapr runtime: ```bash kubectl rollout restart deploy/ ``` -4. All done! +{{% /codetab %}} + +{{< /tabs >}} -#### Upgrading existing Dapr to enable high availability mode -Enabling HA mode in an existing Dapr deployment requires additional steps. Please refer to [this paragraph]({{< ref "kubernetes-production.md#enabling-high-availability-in-an-existing-dapr-deployment" >}}) for more details. +## Upgrade existing Dapr deployment to enable high availability mode +[Enable high availability mode in an existing Dapr deployment with a few additional steps.]({{< ref "kubernetes-production.md#enabling-high-availability-in-an-existing-dapr-deployment" >}}) -## Next steps +## Related links - [Dapr on Kubernetes]({{< ref kubernetes-overview.md >}}) -- [Dapr production guidelines]({{< ref kubernetes-production.md >}}) +- [More on upgrading Dapr with Helm]({{< ref "kubernetes-production.md#upgrade-dapr-with-helm" >}}) +- [Dapr production guidelines]({{< ref kubernetes-production.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-volume-mounts.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-volume-mounts.md index bb41bfda205..00ee915dda2 100644 --- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-volume-mounts.md +++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-volume-mounts.md @@ -6,8 +6,6 @@ weight: 80000 description: "Configure the Dapr sidecar to mount Pod Volumes" --- -## Introduction - The Dapr sidecar can be configured to mount any Kubernetes Volume attached to the application Pod. These Volumes can be accessed by the `daprd` (sidecar) container in _read-only_ or _read-write_ modes. If a Volume is configured to be mounted but it does not exist in the Pod, Dapr logs a warning and ignores it. For more information on different types of Volumes, check the [Kubernetes documentation](https://kubernetes.io/docs/concepts/storage/volumes/). @@ -16,21 +14,30 @@ For more information on different types of Volumes, check the [Kubernetes docume You can set the following annotations in your deployment YAML: -1. **dapr.io/volume-mounts**: for read-only volume mounts -1. **dapr.io/volume-mounts-rw**: for read-write volume mounts +| Annotation | Description | +| ---------- | ----------- | +| `dapr.io/volume-mounts` | For read-only volume mounts | +| `dapr.io/volume-mounts-rw` | For read-write volume mounts | -These annotations are comma separated pairs of `volume-name:path/in/container`. Make sure that the corresponding Volumes exist in the Pod spec. +These annotations are comma separated pairs of `volume-name:path/in/container`. Verify the corresponding Volumes exist in the Pod spec. Within the official container images, Dapr runs as a process with user ID (UID) `65532`. Make sure that folders and files inside the mounted Volume are writable or readable by user `65532` as appropriate. -Although you can mount a Volume in any folder within the Dapr sidecar container, prevent conflicts and ensure smooth operations going forward by placing all mountpoints within one of these two locations, or in a subfolder within them: +Although you can mount a Volume in any folder within the Dapr sidecar container, prevent conflicts and ensure smooth operations going forward by placing all mountpoints within one of the following locations, or in a subfolder within them: + +| Location | Description | +| -------- | ----------- | +| `/mnt` | Recommended for Volumes containing persistent data that the Dapr sidecar process can read and/or write. | +| `/tmp` | Recommended for Volumes containing temporary data, such as scratch disks. | -- `/mnt` is recommended for Volumes containing persistent data that the Dapr sidecar process can read and/or write. -- `/tmp` is recommended for Volumes containing temporary data, such as scratch disks. +## Examples -### Example +### Basic deployment resource example -In the example Deployment resource below, `my-volume1` and `my-volume2` are available inside the sidecar container at `/mnt/sample1` and `/mnt/sample2` respectively, in read-only mode. `my-volume3` is available inside the sidecar container at `/tmp/sample3` in read-write mode. +In the example Deployment resource below: +- `my-volume1` is available inside the sidecar container at `/mnt/sample1` in read-only mode +- `my-volume2` is available inside the sidecar container at `/mnt/sample2` in read-only mode +- `my-volume3` is available inside the sidecar container at `/tmp/sample3` in read-write mode ```yaml apiVersion: apps/v1 @@ -68,59 +75,57 @@ spec: ... ``` -## Examples - ### Custom secrets storage using local file secret store Since any type of Kubernetes Volume can be attached to the sidecar, you can use the local file secret store to read secrets from a variety of places. For example, if you have a Network File Share (NFS) server running at `10.201.202.203`, with secrets stored at `/secrets/stage/secrets.json`, you can use that as a secrets storage. 1. Configure the application pod to mount the NFS and attach it to the Dapr sidecar. - ```yaml - apiVersion: apps/v1 - kind: Deployment - metadata: - name: myapp - ... - spec: - ... - template: - ... - annotations: - dapr.io/enabled: "true" - dapr.io/app-id: "myapp" - dapr.io/app-port: "8000" - dapr.io/volume-mounts: "nfs-secrets-vol:/mnt/secrets" - spec: - volumes: - - name: nfs-secrets-vol - nfs: - server: 10.201.202.203 - path: /secrets/stage - ... - ``` - -2. Point the local file secret store component to the attached file. - - ```yaml - apiVersion: dapr.io/v1alpha1 - kind: Component - metadata: - name: local-secret-store - spec: - type: secretstores.local.file - version: v1 - metadata: - - name: secretsFile - value: /mnt/secrets/secrets.json - ``` + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: myapp + ... + spec: + ... + template: + ... + annotations: + dapr.io/enabled: "true" + dapr.io/app-id: "myapp" + dapr.io/app-port: "8000" + dapr.io/volume-mounts: "nfs-secrets-vol:/mnt/secrets" + spec: + volumes: + - name: nfs-secrets-vol + nfs: + server: 10.201.202.203 + path: /secrets/stage + ... + ``` + +1. Point the local file secret store component to the attached file. + + ```yaml + apiVersion: dapr.io/v1alpha1 + kind: Component + metadata: + name: local-secret-store + spec: + type: secretstores.local.file + version: v1 + metadata: + - name: secretsFile + value: /mnt/secrets/secrets.json + ``` -3. Use the secrets. +1. Use the secrets. - ``` - GET http://localhost:/v1.0/secrets/local-secret-store/my-secret - ``` + ``` + GET http://localhost:/v1.0/secrets/local-secret-store/my-secret + ``` ## Related links -- [Dapr Kubernetes pod annotations spec]({{< ref arguments-annotations-overview.md >}}) +[Dapr Kubernetes pod annotations spec]({{< ref arguments-annotations-overview.md >}}) diff --git a/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-docker.md b/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-docker.md index 3211ac08c13..3e7c090cbfd 100644 --- a/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-docker.md +++ b/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-docker.md @@ -16,7 +16,7 @@ This article provides guidance on running Dapr with Docker on a Windows/Linux/ma ## Initialize Dapr environment -To initialize the Dapr control-plane containers and create a default configuration file, run: +To initialize the Dapr control plane containers and create a default configuration file, run: ```bash dapr init diff --git a/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-podman.md b/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-podman.md index fda567276bf..bbb1184e878 100644 --- a/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-podman.md +++ b/daprdocs/content/en/operations/hosting/self-hosted/self-hosted-with-podman.md @@ -15,7 +15,7 @@ This article provides guidance on running Dapr with Podman on a Windows/Linux/ma ## Initialize Dapr environment -To initialize the Dapr control-plane containers and create a default configuration file, run: +To initialize the Dapr control plane containers and create a default configuration file, run: ```bash dapr init --container-runtime podman diff --git a/daprdocs/content/en/operations/monitoring/_index.md b/daprdocs/content/en/operations/monitoring/_index.md deleted file mode 100644 index decdfcabcc2..00000000000 --- a/daprdocs/content/en/operations/monitoring/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "Observe your application with Dapr" -linkTitle: "Observability" -weight: 80 -description: "How to observe and gain insights into your application" ---- diff --git a/daprdocs/content/en/operations/monitoring/metrics/azure-monitor.md b/daprdocs/content/en/operations/monitoring/metrics/azure-monitor.md deleted file mode 100644 index d63b820ea56..00000000000 --- a/daprdocs/content/en/operations/monitoring/metrics/azure-monitor.md +++ /dev/null @@ -1,132 +0,0 @@ ---- -type: docs -title: "How-To: Set up Azure Monitor to search logs and collect metrics" -linkTitle: "Azure Monitor" -weight: 7000 -description: "Enable Dapr metrics and logs with Azure Monitor for Azure Kubernetes Service (AKS)" ---- - -## Prerequisites - -- [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/) -- [Enable Azure Monitor For containers in AKS](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview) -- [kubectl](https://kubernetes.io/docs/tasks/tools/) -- [Helm 3](https://helm.sh/) - -## Enable Prometheus metric scrape using config map - -1. Make sure that omsagents are running - -```bash -$ kubectl get pods -n kube-system -NAME READY STATUS RESTARTS AGE -... -omsagent-75qjs 1/1 Running 1 44h -omsagent-c7c4t 1/1 Running 0 44h -omsagent-rs-74f488997c-dshpx 1/1 Running 1 44h -omsagent-smtk7 1/1 Running 1 44h -... -``` - -2. Apply config map to enable Prometheus metrics endpoint scrape. - -You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable prometheus metrics endpoint scrape. - -If you installed Dapr to the different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example: - -```yaml -... - prometheus-data-collection-settings: |- - [prometheus_data_collection_settings.cluster] - interval = "1m" - monitor_kubernetes_pods = true - monitor_kubernetes_pods_namespaces = ["dapr-system", "default"] - [prometheus_data_collection_settings.node] - interval = "1m" -... -``` - -Apply config map: - -```bash -kubectl apply -f ./azm-config.map.yaml -``` - -## Install Dapr with JSON formatted logs - -1. Install Dapr with enabling JSON-formatted logs - -```bash -helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true -``` - -2. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations. - -> Note: OMS Agent scrapes the metrics only if replicaset has Prometheus annotations. - -Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml. - -Example: -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pythonapp - namespace: default - labels: - app: python -spec: - replicas: 1 - selector: - matchLabels: - app: python - template: - metadata: - labels: - app: python - annotations: - dapr.io/enabled: "true" - dapr.io/app-id: "pythonapp" - dapr.io/log-as-json: "true" - prometheus.io/scrape: "true" - prometheus.io/port: "9090" - prometheus.io/path: "/" - -... -``` - -## Search metrics and logs with Azure Monitor - -1. Go to Azure Monitor - -2. Search Dapr logs - -Here is an example query, to parse JSON formatted logs and query logs from dapr system processes. - -``` -ContainerLog -| extend parsed=parse_json(LogEntry) -| project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance'] -| where level != "" -| sort by Time -``` - -3. Search metrics - -This query, queries process_resident_memory_bytes Prometheus metrics for Dapr system processes and renders timecharts - -``` -InsightsMetrics -| where Namespace == "prometheus" and Name == "process_resident_memory_bytes" -| extend tags=parse_json(Tags) -| project TimeGenerated, Name, Val, app=tostring(tags['app']) -| summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app -| where app startswith "dapr-" -| render timechart -``` - -# References - -* [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration) -* [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config) -* [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language) diff --git a/daprdocs/content/en/operations/monitoring/tracing/_index.md b/daprdocs/content/en/operations/monitoring/tracing/_index.md deleted file mode 100644 index d9948ff4bfb..00000000000 --- a/daprdocs/content/en/operations/monitoring/tracing/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "Tracing" -linkTitle: "Tracing" -weight: 100 -description: "How to setup your observability tools to receive application traces" ---- diff --git a/daprdocs/content/en/operations/monitoring/tracing/otel-collector/open-telemetry-collector-appinsights.md b/daprdocs/content/en/operations/monitoring/tracing/otel-collector/open-telemetry-collector-appinsights.md deleted file mode 100644 index 4fe2c95a720..00000000000 --- a/daprdocs/content/en/operations/monitoring/tracing/otel-collector/open-telemetry-collector-appinsights.md +++ /dev/null @@ -1,72 +0,0 @@ ---- -type: docs -title: "Using OpenTelemetry Collector to collect traces to send to AppInsights" -linkTitle: "Using the OpenTelemetry for Azure AppInsights" -weight: 1000 -description: "How to push trace events to Azure Application Insights, using the OpenTelemetry Collector." ---- - -Dapr integrates with [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) using the Zipkin API. This guide walks through an example using Dapr to push trace events to Azure Application Insights, using the OpenTelemetry Collector. - -## Requirements - -A installation of Dapr on Kubernetes. - -## How to configure distributed tracing with Application Insights - -### Setup Application Insights - -1. First, you'll need an Azure account. See instructions [here](https://azure.microsoft.com/free/) to apply for a **free** Azure account. -2. Follow instructions [here](https://docs.microsoft.com/azure/azure-monitor/app/create-new-resource) to create a new Application Insights resource. -3. Get the Application Insights Intrumentation key from your Application Insights page. - -### Run OpenTelemetry Collector to push to your Application Insights instance - -Install the OpenTelemetry Collector to your Kubernetes cluster to push events to your Application Insights instance - -1. Check out the file [open-telemetry-collector-appinsights.yaml](/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml) and replace the `` placeholder with your Application Insights Instrumentation Key. - -2. Apply the configuration with `kubectl apply -f open-telemetry-collector-appinsights.yaml`. - -Next, set up both a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector. - -1. Create a collector-config.yaml file with this [content](/docs/open-telemetry-collector/collector-config.yaml) - -2. Apply the configuration with `kubectl apply -f collector-config.yaml`. - -### Deploy your app with tracing - -When running in Kubernetes mode, apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - ... -spec: - ... - template: - metadata: - ... - annotations: - dapr.io/enabled: "true" - dapr.io/app-id: "MyApp" - dapr.io/app-port: "8080" - dapr.io/config: "appconfig" -``` - -Some of the quickstarts such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator) already configure these settings, so if you are using those no additional settings are needed. - -That's it! There's no need include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you. - -> **NOTE**: You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters. - -Deploy and run some applications. After a few minutes, you should see tracing logs appearing in your Application Insights resource. You can also use the **Application Map** to examine the topology of your services, as shown below: - -![Application map](/images/open-telemetry-app-insights.png) - -> **NOTE**: Only operations going through Dapr API exposed by Dapr sidecar (e.g. service invocation or event publishing) are displayed in Application Map topology. - -## Related links -* Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md) -* How to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}}) diff --git a/daprdocs/content/en/operations/monitoring/tracing/otel-collector/open-telemetry-collector.md b/daprdocs/content/en/operations/monitoring/tracing/otel-collector/open-telemetry-collector.md deleted file mode 100644 index 4526683a52e..00000000000 --- a/daprdocs/content/en/operations/monitoring/tracing/otel-collector/open-telemetry-collector.md +++ /dev/null @@ -1,74 +0,0 @@ ---- -type: docs -title: "Using OpenTelemetry Collector to collect traces" -linkTitle: "Using the OpenTelemetry Collector" -weight: 900 -description: "How to use Dapr to push trace events through the OpenTelemetry Collector." ---- - -{{% alert title="Note" color="primary" %}} -Dapr directly writes traces using the OpenTelemetry (OTEL) protocol as the recommended method. For observability tools that support OTEL protocol, you do not need to use the OpenTelemetry Collector. - -Dapr can also write traces using the Zipkin protocol. Previous to supporting the OTEL protocol, combining the Zipkin protocol with the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) enabled you to send traces to observability tools such as AWS X-Ray, Google Cloud Operations Suite, and Azure AppInsights. This approach remains for reference purposes only. -{{% /alert %}} - -![Using OpenTelemetry Collect to integrate with many backend](/images/open-telemetry-collector.png) - -## Requirements - -1. A installation of Dapr on Kubernetes. - -2. You are already setting up your trace backends to receive traces. - -3. Check OpenTelemetry Collector exporters [here](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter) and [here](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter) to see if your trace backend is supported by the OpenTelemetry Collector. On those linked pages, find the exporter you want to use and read its doc to find out the parameters required. - -## Setting OpenTelemetry Collector - -### Run OpenTelemetry Collector to push to your trace backend - -1. Check out the file [open-telemetry-collector-generic.yaml](/docs/open-telemetry-collector/open-telemetry-collector-generic.yaml) and replace the section marked with `` with the correct settings for your trace exporter. Again, refer to the OpenTelemetry Collector links in the Prerequisites section to determine the correct settings. - -2. Apply the configuration with `kubectl apply -f open-telemetry-collector-generic.yaml`. - -## Set up Dapr to send trace to OpenTelemetry Collector - -### Turn on tracing in Dapr -Next, set up both a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector. - -1. Create a collector-config.yaml file with this [content](/docs/open-telemetry-collector/collector-config.yaml) - -2. Apply the configuration with `kubectl apply -f collector-config.yaml`. - -### Deploy your app with tracing - -When running in Kubernetes mode, apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example: - -```yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - ... -spec: - ... - template: - metadata: - ... - annotations: - dapr.io/enabled: "true" - dapr.io/app-id: "MyApp" - dapr.io/app-port: "8080" - dapr.io/config: "appconfig" -``` - -Some of the quickstarts such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator) already configure these settings, so if you are using those no additional settings are needed. - -That's it! There's no need include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you. - -> **NOTE**: You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters. - -Deploy and run some applications. Wait for the trace to propagate to your tracing backend and view them there. - -## Related links -* Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md) -* How to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}}) - diff --git a/daprdocs/content/en/operations/observability/_index.md b/daprdocs/content/en/operations/observability/_index.md new file mode 100644 index 00000000000..fbbd1abbeb0 --- /dev/null +++ b/daprdocs/content/en/operations/observability/_index.md @@ -0,0 +1,18 @@ +--- +type: docs +title: "Observability" +linkTitle: "Observability" +weight: 60 +description: See and measure the message calls to components and between networked services +--- + +[The following overview video and demo](https://www.youtube.com/live/0y7ne6teHT4?si=3bmNSSyIEIVSF-Ej&t=9931) demonstrates how observability in Dapr works. + + + +{{% alert title="More about Dapr Observability" color="primary" %}} + Learn more about how to use Dapr Observability: + - Explore observability via any of the supporting [Dapr SDKs]({{< ref sdks >}}). + - Review the [Observability API reference documentation]({{< ref health_api.md >}}). + - Read the [general overview of the observability concept]({{< ref observability-concept >}}) in Dapr. +{{% /alert %}} diff --git a/daprdocs/content/en/operations/monitoring/logging/_index.md b/daprdocs/content/en/operations/observability/logging/_index.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/logging/_index.md rename to daprdocs/content/en/operations/observability/logging/_index.md diff --git a/daprdocs/content/en/operations/monitoring/logging/fluentd.md b/daprdocs/content/en/operations/observability/logging/fluentd.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/logging/fluentd.md rename to daprdocs/content/en/operations/observability/logging/fluentd.md diff --git a/daprdocs/content/en/operations/monitoring/logging/logs.md b/daprdocs/content/en/operations/observability/logging/logs.md similarity index 99% rename from daprdocs/content/en/operations/monitoring/logging/logs.md rename to daprdocs/content/en/operations/observability/logging/logs.md index 397588b6969..5d0d9492a5f 100644 --- a/daprdocs/content/en/operations/monitoring/logging/logs.md +++ b/daprdocs/content/en/operations/observability/logging/logs.md @@ -1,7 +1,7 @@ --- type: docs title: "Logs" -linkTitle: "Logs" +linkTitle: "Overview" weight: 1000 description: "Understand Dapr logging" --- diff --git a/daprdocs/content/en/operations/monitoring/logging/newrelic.md b/daprdocs/content/en/operations/observability/logging/newrelic.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/logging/newrelic.md rename to daprdocs/content/en/operations/observability/logging/newrelic.md diff --git a/daprdocs/content/en/operations/monitoring/metrics/_index.md b/daprdocs/content/en/operations/observability/metrics/_index.md similarity index 77% rename from daprdocs/content/en/operations/monitoring/metrics/_index.md rename to daprdocs/content/en/operations/observability/metrics/_index.md index 14e6885c851..dd7496e7276 100644 --- a/daprdocs/content/en/operations/monitoring/metrics/_index.md +++ b/daprdocs/content/en/operations/observability/metrics/_index.md @@ -1,7 +1,7 @@ --- type: docs title: "Metrics" -linkTitle: "View metrics" +linkTitle: "Metrics" weight: 300 description: "How to view Dapr metrics" --- diff --git a/daprdocs/content/en/operations/observability/metrics/azure-monitor.md b/daprdocs/content/en/operations/observability/metrics/azure-monitor.md new file mode 100644 index 00000000000..3011ef399b4 --- /dev/null +++ b/daprdocs/content/en/operations/observability/metrics/azure-monitor.md @@ -0,0 +1,134 @@ +--- +type: docs +title: "How-To: Set up Azure Monitor to search logs and collect metrics" +linkTitle: "Azure Monitor" +weight: 7000 +description: "Enable Dapr metrics and logs with Azure Monitor for Azure Kubernetes Service (AKS)" +--- + +## Prerequisites + +- [Azure Kubernetes Service](https://docs.microsoft.com/azure/aks/) +- [Enable Azure Monitor For containers in AKS](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-overview) +- [kubectl](https://kubernetes.io/docs/tasks/tools/) +- [Helm 3](https://helm.sh/) + +## Enable Prometheus metric scrape using config map + +1. Make sure that Azure Monitor Agents (AMA) are running. + + ```bash + $ kubectl get pods -n kube-system + NAME READY STATUS RESTARTS AGE + ... + ama-logs-48kpv 2/2 Running 0 2d13h + ama-logs-mx24c 2/2 Running 0 2d13h + ama-logs-rs-f9bbb9898-vbt6k 1/1 Running 0 30h + ama-logs-sm2mz 2/2 Running 0 2d13h + ama-logs-z7p4c 2/2 Running 0 2d13h + ... + ``` + +1. Apply config map to enable Prometheus metrics endpoint scrape. + + You can use [azm-config-map.yaml](/docs/azm-config-map.yaml) to enable Prometheus metrics endpoint scrape. + + If you installed Dapr to a different namespace, you need to change the `monitor_kubernetes_pod_namespaces` array values. For example: + + ```yaml + ... + prometheus-data-collection-settings: |- + [prometheus_data_collection_settings.cluster] + interval = "1m" + monitor_kubernetes_pods = true + monitor_kubernetes_pods_namespaces = ["dapr-system", "default"] + [prometheus_data_collection_settings.node] + interval = "1m" + ... + ``` + + Apply config map: + + ```bash + kubectl apply -f ./azm-config.map.yaml + ``` + +## Install Dapr with JSON formatted logs + +1. Install Dapr with enabling JSON-formatted logs. + + ```bash + helm install dapr dapr/dapr --namespace dapr-system --set global.logAsJson=true + ``` + +1. Enable JSON formatted log in Dapr sidecar and add Prometheus annotations. + + > Note: The Azure Monitor Agents (AMA) only sends the metrics if the Prometheus annotations are set. + + Add `dapr.io/log-as-json: "true"` annotation to your deployment yaml. + + Example: + + ```yaml + apiVersion: apps/v1 + kind: Deployment + metadata: + name: pythonapp + namespace: default + labels: + app: python + spec: + replicas: 1 + selector: + matchLabels: + app: python + template: + metadata: + labels: + app: python + annotations: + dapr.io/enabled: "true" + dapr.io/app-id: "pythonapp" + dapr.io/log-as-json: "true" + prometheus.io/scrape: "true" + prometheus.io/port: "9090" + prometheus.io/path: "/" + + ... + ``` + +## Search metrics and logs with Azure Monitor + +1. Go to Azure Monitor in the Azure portal. + +1. Search Dapr **Logs**. + + Here is an example query, to parse JSON formatted logs and query logs from Dapr system processes. + + ``` + ContainerLog + | extend parsed=parse_json(LogEntry) + | project Time=todatetime(parsed['time']), app_id=parsed['app_id'], scope=parsed['scope'],level=parsed['level'], msg=parsed['msg'], type=parsed['type'], ver=parsed['ver'], instance=parsed['instance'] + | where level != "" + | sort by Time + ``` + +1. Search **Metrics**. + + This query, queries `process_resident_memory_bytes` Prometheus metrics for Dapr system processes and renders timecharts. + + ``` + InsightsMetrics + | where Namespace == "prometheus" and Name == "process_resident_memory_bytes" + | extend tags=parse_json(Tags) + | project TimeGenerated, Name, Val, app=tostring(tags['app']) + | summarize memInBytes=percentile(Val, 99) by bin(TimeGenerated, 1m), app + | where app startswith "dapr-" + | render timechart + ``` + +## References + +- [Configure scraping of Prometheus metrics with Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-prometheus-integration) +- [Configure agent data collection for Azure Monitor for containers](https://docs.microsoft.com/azure/azure-monitor/insights/container-insights-agent-config) +- [Azure Monitor Query](https://docs.microsoft.com/azure/azure-monitor/log-query/query-language) diff --git a/daprdocs/content/en/operations/monitoring/metrics/grafana.md b/daprdocs/content/en/operations/observability/metrics/grafana.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/metrics/grafana.md rename to daprdocs/content/en/operations/observability/metrics/grafana.md diff --git a/daprdocs/content/en/operations/monitoring/metrics/metrics-overview.md b/daprdocs/content/en/operations/observability/metrics/metrics-overview.md similarity index 99% rename from daprdocs/content/en/operations/monitoring/metrics/metrics-overview.md rename to daprdocs/content/en/operations/observability/metrics/metrics-overview.md index 0d30185dbb8..2b6e86a3f52 100644 --- a/daprdocs/content/en/operations/monitoring/metrics/metrics-overview.md +++ b/daprdocs/content/en/operations/observability/metrics/metrics-overview.md @@ -1,7 +1,7 @@ --- type: docs title: "Configure metrics" -linkTitle: "Configure metrics" +linkTitle: "Overview" weight: 4000 description: "Enable or disable Dapr metrics " --- diff --git a/daprdocs/content/en/operations/monitoring/metrics/newrelic.md b/daprdocs/content/en/operations/observability/metrics/newrelic.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/metrics/newrelic.md rename to daprdocs/content/en/operations/observability/metrics/newrelic.md diff --git a/daprdocs/content/en/operations/monitoring/metrics/prometheus.md b/daprdocs/content/en/operations/observability/metrics/prometheus.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/metrics/prometheus.md rename to daprdocs/content/en/operations/observability/metrics/prometheus.md diff --git a/daprdocs/content/en/operations/observability/tracing/_index.md b/daprdocs/content/en/operations/observability/tracing/_index.md new file mode 100644 index 00000000000..f29b7ba7e10 --- /dev/null +++ b/daprdocs/content/en/operations/observability/tracing/_index.md @@ -0,0 +1,7 @@ +--- +type: docs +title: "Tracing" +linkTitle: "Tracing" +weight: 200 +description: Learn about tracing scenarios and how to use tracing for visibility in your application +--- \ No newline at end of file diff --git a/daprdocs/content/en/operations/monitoring/tracing/datadog.md b/daprdocs/content/en/operations/observability/tracing/datadog.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/tracing/datadog.md rename to daprdocs/content/en/operations/observability/tracing/datadog.md diff --git a/daprdocs/content/en/operations/monitoring/tracing/jaeger.md b/daprdocs/content/en/operations/observability/tracing/jaeger.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/tracing/jaeger.md rename to daprdocs/content/en/operations/observability/tracing/jaeger.md diff --git a/daprdocs/content/en/operations/monitoring/tracing/newrelic.md b/daprdocs/content/en/operations/observability/tracing/newrelic.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/tracing/newrelic.md rename to daprdocs/content/en/operations/observability/tracing/newrelic.md diff --git a/daprdocs/content/en/operations/monitoring/tracing/otel-collector/_index.md b/daprdocs/content/en/operations/observability/tracing/otel-collector/_index.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/tracing/otel-collector/_index.md rename to daprdocs/content/en/operations/observability/tracing/otel-collector/_index.md diff --git a/daprdocs/content/en/operations/observability/tracing/otel-collector/open-telemetry-collector-appinsights.md b/daprdocs/content/en/operations/observability/tracing/otel-collector/open-telemetry-collector-appinsights.md new file mode 100644 index 00000000000..c851ec8a495 --- /dev/null +++ b/daprdocs/content/en/operations/observability/tracing/otel-collector/open-telemetry-collector-appinsights.md @@ -0,0 +1,83 @@ +--- +type: docs +title: "Using OpenTelemetry Collector to collect traces to send to App Insights" +linkTitle: "Using the OpenTelemetry for Azure App Insights" +weight: 1000 +description: "How to push trace events to Azure Application Insights, using the OpenTelemetry Collector." +--- + +Dapr integrates with [OpenTelemetry (OTEL) Collector](https://github.com/open-telemetry/opentelemetry-collector) using the Zipkin API. This guide walks through an example using Dapr to push trace events to Azure Application Insights, using the OpenTelemetry Collector. + +## Prerequisites + +- [Install Dapr on Kubernetes]({{< ref kubernetes >}}) +- [Set up an App Insights resource](https://docs.microsoft.com/azure/azure-monitor/app/create-new-resource) and make note of your App Insights instrumentation key. + +## Set up OTEL Collector to push to your App Insights instance + +To push events to your App Insights instance, install the OTEL Collector to your Kubernetes cluster. + +1. Check out the [`open-telemetry-collector-appinsights.yaml`](/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml) file. + +1. Replace the `` placeholder with your App Insights instrumentation key. + +1. Apply the configuration with: + + ```sh + kubectl apply -f open-telemetry-collector-appinsights.yaml + ``` + +## Set up Dapr to send trace to OTEL Collector + +Set up a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector. + +1. Use this [`collector-config.yaml`](/docs/open-telemetry-collector/collector-config.yaml) file to create your own configuration. + +1. Apply the configuration with: + + ```sh + kubectl apply -f collector-config.yaml + ``` + +## Deploy your app with tracing + +Apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + ... +spec: + ... + template: + metadata: + ... + annotations: + dapr.io/enabled: "true" + dapr.io/app-id: "MyApp" + dapr.io/app-port: "8080" + dapr.io/config: "appconfig" +``` + +{{% alert title="Note" color="primary" %}} +If you are using one of the Dapr tutorials, such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator), the `appconfig` configuration is already configured, so no additional settings are needed. +{{% /alert %}} + +You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters. + +That's it! There's no need to include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you. + +## View traces + +Deploy and run some applications. After a few minutes, you should see tracing logs appearing in your App Insights resource. You can also use the **Application Map** to examine the topology of your services, as shown below: + +![Application map](/images/open-telemetry-app-insights.png) + +{{% alert title="Note" color="primary" %}} +Only operations going through Dapr API exposed by Dapr sidecar (for example, service invocation or event publishing) are displayed in Application Map topology. +{{% /alert %}} + +## Related links +- Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md) +- Learn how to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}}) diff --git a/daprdocs/content/en/operations/observability/tracing/otel-collector/open-telemetry-collector.md b/daprdocs/content/en/operations/observability/tracing/otel-collector/open-telemetry-collector.md new file mode 100644 index 00000000000..aeff1a2c966 --- /dev/null +++ b/daprdocs/content/en/operations/observability/tracing/otel-collector/open-telemetry-collector.md @@ -0,0 +1,83 @@ +--- +type: docs +title: "Using OpenTelemetry Collector to collect traces" +linkTitle: "Using the OpenTelemetry Collector" +weight: 900 +description: "How to use Dapr to push trace events through the OpenTelemetry Collector." +--- + +Dapr directly writes traces using the OpenTelemetry (OTEL) protocol as the **recommended** method. For observability tools that support OTEL protocol, it is recommended to use the OpenTelemetry Collector, as it allows your application to quickly offload data and includes features, such as retries, batching, and encryption. For more information, read the Open Telemetry [documentation](https://opentelemetry.io/docs/collector/#when-to-use-a-collector). + +Dapr can also write traces using the Zipkin protocol. Previous to supporting the OTEL protocol, you use the Zipkin protocol with the [OpenTelemetry Collector](https://github.com/open-telemetry/opentelemetry-collector) to send traces to observability tools such as AWS X-Ray, Google Cloud Operations Suite, and Azure Monitor. Both protocol approaches are valid, however OTEL is the recommended choice. + +![Using OpenTelemetry Collect to integrate with many backend](/images/open-telemetry-collector.png) + +## Prerequisites + +- [Install Dapr on Kubernetes]({{< ref kubernetes >}}) +- Verify your trace backends are already set up to receive traces +- Review your OTEL Collector exporter's required parameters: + - [`opentelemetry-collector-contrib/exporter`](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/exporter) + - [`opentelemetry-collector/exporter`](https://github.com/open-telemetry/opentelemetry-collector/tree/main/exporter) + +## Set up OTEL Collector to push to your trace backend + +1. Check out the [`open-telemetry-collector-generic.yaml`](/docs/open-telemetry-collector/open-telemetry-collector-generic.yaml). + +1. Replace the `` section with the correct settings for your trace exporter. + - Refer to the OTEL Collector links in the [prerequisites section]({{< ref "#prerequisites.md" >}}) to determine the correct settings. + +1. Apply the configuration with: + + ```sh + kubectl apply -f open-telemetry-collector-generic.yaml + ``` + +## Set up Dapr to send traces to OTEL Collector + +Set up a Dapr configuration file to turn on tracing and deploy a tracing exporter component that uses the OpenTelemetry Collector. + +1. Use this [`collector-config.yaml`](/docs/open-telemetry-collector/collector-config.yaml) file to create your own configuration. + +1. Apply the configuration with: + + ```sh + kubectl apply -f collector-config.yaml + ``` + +## Deploy your app with tracing + +Apply the `appconfig` configuration by adding a `dapr.io/config` annotation to the container that you want to participate in the distributed tracing, as shown in the following example: + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + ... +spec: + ... + template: + metadata: + ... + annotations: + dapr.io/enabled: "true" + dapr.io/app-id: "MyApp" + dapr.io/app-port: "8080" + dapr.io/config: "appconfig" +``` + +{{% alert title="Note" color="primary" %}} +If you are using one of the Dapr tutorials, such as [distributed calculator](https://github.com/dapr/quickstarts/tree/master/tutorials/distributed-calculator), the `appconfig` configuration is already configured, so no additional settings are needed. +{{% /alert %}} + +You can register multiple tracing exporters at the same time, and the tracing logs are forwarded to all registered exporters. + +That's it! There's no need to include any SDKs or instrument your application code. Dapr automatically handles the distributed tracing for you. + +## View traces + +Deploy and run some applications. Wait for the trace to propagate to your tracing backend and view them there. + +## Related links +- Try out the [observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability/README.md) +- Learn how to set [tracing configuration options]({{< ref "configuration-overview.md#tracing" >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/monitoring/tracing/setup-tracing.md b/daprdocs/content/en/operations/observability/tracing/setup-tracing.md similarity index 80% rename from daprdocs/content/en/operations/monitoring/tracing/setup-tracing.md rename to daprdocs/content/en/operations/observability/tracing/setup-tracing.md index 52147498cf8..3ae01b27603 100644 --- a/daprdocs/content/en/operations/monitoring/tracing/setup-tracing.md +++ b/daprdocs/content/en/operations/observability/tracing/setup-tracing.md @@ -2,14 +2,14 @@ type: docs title: "Configure Dapr to send distributed tracing data" linkTitle: "Configure tracing" -weight: 100 -description: "Configure Dapr to send distributed tracing data" +weight: 30 +description: "Set up Dapr to send distributed tracing data" --- -It is recommended to run Dapr with tracing enabled for any production -scenario. You can configure Dapr to send tracing and telemetry data -to many observability tools based on your environment, whether it is running in -the cloud or on-premises. +{{% alert title="Note" color="primary" %}} +It is recommended to run Dapr with tracing enabled for any production scenario. You can configure Dapr to send tracing and telemetry data to many observability tools based on your environment, whether it is running in the cloud or on-premises. +{{% /alert %}} + ## Configuration @@ -68,4 +68,13 @@ turns on tracing for the sidecar. |----------------------|-------------| | `OTEL_EXPORTER_OTLP_ENDPOINT` | Sets the Open Telemetry (OTEL) server address, turns on tracing | | `OTEL_EXPORTER_OTLP_INSECURE` | Sets the connection to the endpoint as unencrypted (true/false) | -| `OTEL_EXPORTER_OTLP_PROTOCOL` | Transport protocol (`grpc`, `http/protobuf`, `http/json`) | \ No newline at end of file +| `OTEL_EXPORTER_OTLP_PROTOCOL` | Transport protocol (`grpc`, `http/protobuf`, `http/json`) | + +## Next steps + +Learn how to set up tracing with one of the following tools: +- [OTEL Collector]({{< ref otel-collector >}}) +- [New Relic]({{< ref newrelic.md >}}) +- [Jaeger]({{< ref jaeger.md >}}) +- [Zipkin]({{< ref zipkin.md >}}) +- [Datadog]({{< ref datadog.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/operations/observability/tracing/tracing-overview.md b/daprdocs/content/en/operations/observability/tracing/tracing-overview.md new file mode 100644 index 00000000000..603e5d12173 --- /dev/null +++ b/daprdocs/content/en/operations/observability/tracing/tracing-overview.md @@ -0,0 +1,118 @@ +--- +type: docs +title: "Distributed tracing overview" +linkTitle: "Overview" +weight: 10 +description: "Overview on using tracing to get visibility into your application" +--- + +Dapr uses the Open Telemetry (OTEL) and Zipkin protocols for distributed traces. OTEL is the industry standard and is the recommended trace protocol to use. + +Most observability tools support OTEL, including: +- [Google Cloud Operations](https://cloud.google.com/products/operations) +- [AWS X-ray](https://aws.amazon.com/xray/) +- [New Relic](https://newrelic.com) +- [Azure Monitor](https://azure.microsoft.com/services/monitor/) +- [Datadog](https://www.datadoghq.com) +- [Zipkin](https://zipkin.io/) +- [Jaeger](https://www.jaegertracing.io/) +- [SignalFX](https://www.signalfx.com/) + +The following diagram demonstrates how Dapr (using OTEL and Zipkin protocols) integrates with multiple observability tools. + +Distributed tracing with Dapr + +## Scenarios + +Tracing is used with service invocaton and pub/sub APIs. You can flow trace context between services that uses these APIs. There are two scenarios for how tracing is used: + + 1. Dapr generates the trace context and you propagate the trace context to another service. + 1. You generate the trace context and Dapr propagates the trace context to a service. + +### Scenario 1: Dapr generates trace context headers + +#### Propagating sequential service calls + +Dapr takes care of creating the trace headers. However, when there are more than two services, you're responsible for propagating the trace headers between them. Let's go through the scenarios with examples: + +##### Single service invocation call + +For example, `service A -> service B`. + +Dapr generates the trace headers in `service A`, which are then propagated from `service A` to `service B`. No further propagation is needed. + +##### Multiple sequential service invocation calls + +For example, `service A -> service B -> propagate trace headers to -> service C` and so on to further Dapr-enabled services. + +Dapr generates the trace headers at the beginning of the request in `service A`, which are then propagated to `service B`. You are now responsible for taking the headers and propagating them to `service C`, since this is specific to your application. + +In other words, if the app is calling to Dapr and wants to trace with an existing trace header (span), it must always propagate to Dapr (from `service B` to `service C`, in this example). Dapr always propagates trace spans to an application. + +{{% alert title="Note" color="primary" %}} +No helper methods are exposed in Dapr SDKs to propagate and retrieve trace context. You need to use HTTP/gRPC clients to propagate and retrieve trace headers through HTTP headers and gRPC metadata. +{{% /alert %}} + +##### Request is from external endpoint + +For example, `from a gateway service to a Dapr-enabled service A`. + +An external gateway ingress calls Dapr, which generates the trace headers and calls `service A`. `Service A` then calls `service B` and further Dapr-enabled services. + +You must propagate the headers from `service A` to `service B`. For example: `Ingress -> service A -> propagate trace headers -> service B`. This is similar to [case 2]({{< ref "tracing-overview.md#multiple-sequential-service-invocation-calls" >}}). + +##### Pub/sub messages + +Dapr generates the trace headers in the published message topic. These trace headers are propagated to any services listening on that topic. + +#### Propagating multiple different service calls + +In the following scenarios, Dapr does some of the work for you, with you then creating or propagating trace headers. + +##### Multiple service calls to different services from single service + +When you are calling multiple services from a single service, you need to propagate the trace headers. For example: + +``` +service A -> service B +[ .. some code logic ..] +service A -> service C +[ .. some code logic ..] +service A -> service D +[ .. some code logic ..] +``` + +In this case: +1. When `service A` first calls `service B`, Dapr generates the trace headers in `service A`. +1. The trace headers in `service A` are propagated to `service B`. +1. These trace headers are returned in the response from `service B` as part of response headers. +1. You then need to propagate the returned trace context to the next services, like `service C` and `service D`, as Dapr does not know you want to reuse the same header. + +### Scenario 2: You generate your own trace context headers from non-Daprized applications + +Generating your own trace context headers is more unusual and typically not required when calling Dapr. + +However, there are scenarios where you could specifically choose to add W3C trace headers into a service call. For example, you have an existing application that does not use Dapr. In this case, Dapr still propagates the trace context headers for you. + +If you decide to generate trace headers yourself, there are three ways this can be done: + +1. Standard OpenTelemetry SDK + + You can use the industry standard [OpenTelemetry SDKs](https://opentelemetry.io/docs/instrumentation/) to generate trace headers and pass these trace headers to a Dapr-enabled service. _This is the preferred method_. + +1. Vendor SDK + + You can use a vendor SDK that provides a way to generate W3C trace headers and pass them to a Dapr-enabled service. + +1. W3C trace context + + You can handcraft a trace context following [W3C trace context specifications](https://www.w3.org/TR/trace-context/) and pass them to a Dapr-enabled service. + + Read [the trace context overview]({{< ref w3c-tracing-overview >}}) for more background and examples on W3C trace context and headers. + +## Related Links + +- [Observability concepts]({{< ref observability-concept.md >}}) +- [W3C Trace Context for distributed tracing]({{< ref w3c-tracing-overview >}}) +- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) +- [Observability quickstart](https://github.com/dapr/quickstarts/tree/master/tutorials/observability) diff --git a/daprdocs/content/en/operations/observability/tracing/w3c-tracing-overview.md b/daprdocs/content/en/operations/observability/tracing/w3c-tracing-overview.md new file mode 100644 index 00000000000..52eccbef4d5 --- /dev/null +++ b/daprdocs/content/en/operations/observability/tracing/w3c-tracing-overview.md @@ -0,0 +1,90 @@ +--- +type: docs +title: "W3C trace context overview" +linkTitle: "W3C trace context" +weight: 20 +description: Background and scenarios for using W3C tracing context and headers with Dapr +--- + +Dapr uses the [Open Telemetry protocol](https://opentelemetry.io/), which in turn uses the [W3C trace context](https://www.w3.org/TR/trace-context/) for distributed tracing for both service invocation and pub/sub messaging. Dapr generates and propagates the trace context information, which can be sent to observability tools for visualization and querying. + +## Background + +Distributed tracing is a methodology implemented by tracing tools to follow, analyze, and debug a transaction across multiple software components. + +Typically, a distributed trace traverses more than one service, which requires it to be uniquely identifiable. **Trace context propagation** passes along this unique identification. + +In the past, trace context propagation was implemented individually by each different tracing vendor. In multi-vendor environments, this causes interoperability problems, such as: + +- Traces collected by different tracing vendors can't be correlated, as there is no shared unique identifier. +- Traces crossing boundaries between different tracing vendors can't be propagated, as there is no forwarded, uniformly agreed set of identification. +- Vendor-specific metadata might be dropped by intermediaries. +- Cloud platform vendors, intermediaries, and service providers cannot guarantee to support trace context propagation, as there is no standard to follow. + +Previously, most applications were monitored by a single tracing vendor and stayed within the boundaries of a single platform provider, so these problems didn't have a significant impact. + +Today, an increasing number of applications are distributed and leverage multiple middleware services and cloud platforms. This transformation of modern applications requires a distributed tracing context propagation standard. + +The [W3C trace context specification](https://www.w3.org/TR/trace-context/) defines a universally agreed-upon format for the exchange of trace context propagation data (referred to as trace context). Trace context solves the above problems by providing: + +- A unique identifier for individual traces and requests, allowing trace data of multiple providers to be linked together. +- An agreed-upon mechanism to forward vendor-specific trace data and avoid broken traces when multiple tracing tools participate in a single transaction. +- An industry standard that intermediaries, platforms, and hardware providers can support. + +This unified approach for propagating trace data improves visibility into the behavior of distributed applications, facilitating problem and performance analysis. + +## W3C trace context and headers format + +### W3C trace context + +Dapr uses the standard W3C trace context headers. + +- For HTTP requests, Dapr uses `traceparent` header. +- For gRPC requests, Dapr uses `grpc-trace-bin` header. + +When a request arrives without a trace ID, Dapr creates a new one. Otherwise, it passes the trace ID along the call chain. + +### W3C trace headers +These are the specific trace context headers that are generated and propagated by Dapr for HTTP and gRPC. + +{{< tabs "HTTP" "gRPC" >}} + +{{% codetab %}} + +Copy these headers when propagating a trace context header from an HTTP response to an HTTP request: + +**Traceparent header** + +The traceparent header represents the incoming request in a tracing system in a common format, understood by all vendors: + +``` +traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01 +``` + +[Learn more about the traceparent fields details](https://www.w3.org/TR/trace-context/#traceparent-header). + +**Tracestate header** + +The tracestate header includes the parent in a potentially vendor-specific format: + +``` +tracestate: congo=t61rcWkgMzE +``` + +[Learn more about the tracestate fields details](https://www.w3.org/TR/trace-context/#tracestate-header). + +{{% /codetab %}} + + + +{{% codetab %}} + +In the gRPC API calls, trace context is passed through `grpc-trace-bin` header. + +{{% /codetab %}} + +{{< /tabs >}} + +## Related Links +- [Learn more about distributed tracing in Dapr]({{< ref tracing-overview.md >}}) +- [W3C Trace Context specification](https://www.w3.org/TR/trace-context/) diff --git a/daprdocs/content/en/operations/monitoring/tracing/zipkin.md b/daprdocs/content/en/operations/observability/tracing/zipkin.md similarity index 100% rename from daprdocs/content/en/operations/monitoring/tracing/zipkin.md rename to daprdocs/content/en/operations/observability/tracing/zipkin.md diff --git a/daprdocs/content/en/operations/resiliency/health-checks/_index.md b/daprdocs/content/en/operations/resiliency/health-checks/_index.md new file mode 100644 index 00000000000..66730af2fa3 --- /dev/null +++ b/daprdocs/content/en/operations/resiliency/health-checks/_index.md @@ -0,0 +1,7 @@ +--- +type: docs +title: "Health checks" +linkTitle: "Health checks" +weight: 400 +description: "How to setup health checks for the Dapr sidecar and your application" +--- \ No newline at end of file diff --git a/daprdocs/content/en/developing-applications/building-blocks/observability/app-health.md b/daprdocs/content/en/operations/resiliency/health-checks/app-health.md similarity index 65% rename from daprdocs/content/en/developing-applications/building-blocks/observability/app-health.md rename to daprdocs/content/en/operations/resiliency/health-checks/app-health.md index 4834bb5432b..193233dd17c 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/observability/app-health.md +++ b/daprdocs/content/en/operations/resiliency/health-checks/app-health.md @@ -2,17 +2,22 @@ type: docs title: "App health checks" linkTitle: "App health checks" -weight: 300 +weight: 100 description: Reacting to apps' health status changes --- -App health checks is a feature that allows probing for the health of your application and reacting to status changes. +The app health checks feature allows probing for the health of your application and reacting to status changes. -Applications can become unresponsive for a variety of reasons: for example, they could be too busy to accept new work, could have crashed, or be in a deadlock state. Sometimes the condition can be transitory, for example if the app is just busy (and will eventually be able to resume accepting new work), or if the application is being restarted for whatever reason and is in its initialization phase. +Applications can become unresponsive for a variety of reasons. For example, your application: +- Could be too busy to accept new work; +- Could have crashed; or +- Could be in a deadlock state. -When app health checks are enabled, the Dapr *runtime* (sidecar) periodically polls your application via HTTP or gRPC calls. +Sometimes the condition can be transitory, for example: +- If the app is just busy and will resume accepting new work eventually +- If the application is being restarted for whatever reason and is in its initialization phase -When it detects a failure in the app's health, Dapr stops accepting new work on behalf of the application by: +App health checks are disabled by default. Once you enable app health checks, the Dapr runtime (sidecar) periodically polls your application via HTTP or gRPC calls. When it detects a failure in the app's health, Dapr stops accepting new work on behalf of the application by: - Unsubscribing from all pub/sub subscriptions - Stopping all input bindings @@ -20,15 +25,14 @@ When it detects a failure in the app's health, Dapr stops accepting new work on These changes are meant to be temporary, and Dapr resumes normal operations once it detects that the application is responsive again. -App health checks are disabled by default. - Diagram showing the app health feature. Running Dapr with app health enabled causes Dapr to periodically probe the app for its health. -### App health checks vs platform-level health checks +## App health checks vs platform-level health checks App health checks in Dapr are meant to be complementary to, and not replace, any platform-level health checks, like [liveness probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) when running on Kubernetes. Platform-level health checks (or liveness probes) generally ensure that the application is running, and cause the platform to restart the application in case of failures. + Unlike platform-level health checks, Dapr's app health checks focus on pausing work to an application that is currently unable to accept it, but is expected to be able to resume accepting work *eventually*. Goals include: - Not bringing more load to an application that is already overloaded. @@ -36,7 +40,9 @@ Unlike platform-level health checks, Dapr's app health checks focus on pausing w In this regard, Dapr's app health checks are "softer", waiting for an application to be able to process work, rather than terminating the running process in a "hard" way. -> Note that for Kubernetes, a failing App Health check won't remove a pod from service discovery: this remains the responsibility of the Kubernetes liveness probe, _not_ Dapr. +{{% alert title="Note" color="primary" %}} +For Kubernetes, a failing app health check won't remove a pod from service discovery: this remains the responsibility of the Kubernetes liveness probe, _not_ Dapr. +{{% /alert %}} ## Configuring app health checks @@ -52,34 +58,50 @@ The full list of options are listed in this table: | CLI flags | Kubernetes deployment annotation | Description | Default value | | ----------------------------- | ----------------------------------- | ----------- | ------------- | | `--enable-app-health-check` | `dapr.io/enable-app-health-check` | Boolean that enables the health checks | Disabled | -| `--app-health-check-path` | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC) | `/healthz` | -| `--app-health-probe-interval` | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe | `5` | -| `--app-health-probe-timeout` | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests | `500` | -| `--app-health-threshold` | `dapr.io/app-health-threshold` | Max number of consecutive failures before the app is considered unhealthy | `3` | +| [`--app-health-check-path`]({{< ref "app-health.md#health-check-paths" >}}) | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC) | `/healthz` | +| [`--app-health-probe-interval`]({{< ref "app-health.md#intervals-timeouts-and-thresholds" >}}) | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe | `5` | +| [`--app-health-probe-timeout`]({{< ref "app-health.md#intervals-timeouts-and-thresholds" >}}) | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests | `500` | +| [`--app-health-threshold`]({{< ref "app-health.md#intervals-timeouts-and-thresholds" >}}) | `dapr.io/app-health-threshold` | Max number of consecutive failures before the app is considered unhealthy | `3` | + +> See the [full Dapr arguments and annotations reference]({{< ref arguments-annotations-overview >}}) for all options and how to enable them. + +Additionally, app health checks are impacted by the protocol used for the app channel, which is configured with the following flag or annotation: -> See the [full Dapr arguments and annotations reference]({{}}) for all options and how to enable them. +| CLI flag | Kubernetes deployment annotation | Description | Default value | +| ----------------------------- | ----------------------------------- | ----------- | ------------- | +| [`--app-protocol`]({{< ref "app-health.md#health-check-paths" >}}) | `dapr.io/app-protocol` | Protocol used for the app channel. supported values are `http`, `grpc`, `https`, `grpcs`, and `h2c` (HTTP/2 Cleartext). | `http` | -Additionally, app health checks are impacted by the protocol used for the app channel, which is configured with the `--app-protocol` flag (self-hosted) or the `dapr.io/app-protocol` annotation (Kubernetes); supported values are `http` (default), `grpc`, `https`, `grpcs`, and `h2c` (HTTP/2 Cleartext). +{{% alert title="Note" color="primary" %}} +A low app health probe timeout value can classify an application as unhealthy if it experiences a sudden high load, causing the response time to degrade. If this happens, increase the `dapr.io/app-health-probe-timeout` value. +{{% /alert %}} ### Health check paths +#### HTTP When using HTTP (including `http`, `https`, and `h2c`) for `app-protocol`, Dapr performs health probes by making an HTTP call to the path specified in `app-health-check-path`, which is `/health` by default. + For your app to be considered healthy, the response must have an HTTP status code in the 200-299 range. Any other status code is considered a failure. Dapr is only concerned with the status code of the response, and ignores any response header or body. +#### gRPC When using gRPC for the app channel (`app-protocol` set to `grpc` or `grpcs`), Dapr invokes the method `/dapr.proto.runtime.v1.AppCallbackHealthCheck/HealthCheck` in your application. Most likely, you will use a Dapr SDK to implement the handler for this method. While responding to a health probe request, your app *may* decide to perform additional internal health checks to determine if it's ready to process work from the Dapr runtime. However, this is not required; it's a choice that depends on your application's needs. ### Intervals, timeouts, and thresholds -When app health checks are enabled, by default Dapr probes your application every 5 seconds. You can configure the interval, in seconds, with `app-health-probe-interval`. These probes happen regularly, regardless of whether your application is healthy or not. +#### Intervals +By default, when app health checks are enabled, Dapr probes your application every 5 seconds. You can configure the interval, in seconds, with `app-health-probe-interval`. These probes happen regularly, regardless of whether your application is healthy or not. +#### Timeouts When the Dapr runtime (sidecar) is initially started, Dapr waits for a successful health probe before considering the app healthy. This means that pub/sub subscriptions, input bindings, and service invocation requests won't be enabled for your application until this first health check is complete and successful. -Health probe requests are considered successful if the application sends a successful response (as explained above) within the timeout configured in `app-health-probe-timeout`. The default value is 500, corresponding to 500 milliseconds (i.e. half a second). +Health probe requests are considered successful if the application sends a successful response (as explained above) within the timeout configured in `app-health-probe-timeout`. The default value is 500, corresponding to 500 milliseconds (half a second). +#### Thresholds Before Dapr considers an app to have entered an unhealthy state, it will wait for `app-health-threshold` consecutive failures, whose default value is 3. This default value means that your application must fail health probes 3 times *in a row* to be considered unhealthy. + If you set the threshold to 1, any failure causes Dapr to assume your app is unhealthy and will stop delivering work to it. + A threshold greater than 1 can help exclude transient failures due to external circumstances. The right value for your application depends on your requirements. Thresholds only apply to failures. A single successful response is enough for Dapr to consider your app to be healthy and resume normal operations. diff --git a/daprdocs/content/en/developing-applications/building-blocks/observability/sidecar-health.md b/daprdocs/content/en/operations/resiliency/health-checks/sidecar-health.md similarity index 84% rename from daprdocs/content/en/developing-applications/building-blocks/observability/sidecar-health.md rename to daprdocs/content/en/operations/resiliency/health-checks/sidecar-health.md index 9385473dd9f..b81efeef9b9 100644 --- a/daprdocs/content/en/developing-applications/building-blocks/observability/sidecar-health.md +++ b/daprdocs/content/en/operations/resiliency/health-checks/sidecar-health.md @@ -11,7 +11,7 @@ Dapr provides a way to determine its health using an [HTTP `/healthz` endpoint]( - Probed for its health - Determined for readiness and liveness -The Dapr `/healthz` endpoint can be used by health probes from the application hosting platform (for example Kubernetes). This topic describes how Dapr integrates with probes from different hosting platforms. +In this guide, you learn how the Dapr `/healthz` endpoint integrate with health probes from the application hosting platform (for example, Kubernetes). When deploying Dapr to a hosting platform like Kubernetes, the Dapr health endpoint is automatically configured for you. @@ -23,20 +23,10 @@ Dapr actors also have a health API endpoint where Dapr probes the application fo Kubernetes uses *readiness* and *liveness* probes to determines the health of the container. -The kubelet uses liveness probes to know when to restart a container. -For example, liveness probes could catch a deadlock, where an application is running but is unable to make progress. Restarting a container in such a state can help to make the application more available despite having bugs. +### Liveness +The kubelet uses liveness probes to know when to restart a container. For example, liveness probes could catch a deadlock (a running application that is unable to make progress). Restarting a container in such a state can help to make the application more available despite having bugs. -The kubelet uses readiness probes to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this readiness signal is to control which pods are used as backends for Kubernetes services. When a pod is not ready, it is removed from Kubernetes service load balancers. - -{{% alert title="Note" color="primary" %}} -The Dapr sidecar will be in ready state once the application is accessible on its configured port. The application cannot access the Dapr components during application start up/initialization. -{{% /alert %}} - -When integrating with Kubernetes, the Dapr sidecar is injected with a Kubernetes probe configuration telling it to use the Dapr healthz endpoint. This is done by the "Sidecar Injector" system service. The integration with the kubelet is shown in the diagram below. - -Diagram of Dapr services interacting - -### How to configure a liveness probe in Kubernetes +#### How to configure a liveness probe in Kubernetes In the pod configuration file, the liveness probe is added in the containers spec section as shown below: @@ -53,7 +43,14 @@ In the above example, the `periodSeconds` field specifies that the kubelet shoul Any HTTP status code between 200 and 399 indicates success; any other status code indicates failure. -### How to configure a readiness probe in Kubernetes +### Readiness +The kubelet uses readiness probes to know when a container is ready to start accepting traffic. A pod is considered ready when all of its containers are ready. One use of this readiness signal is to control which pods are used as backends for Kubernetes services. When a pod is not ready, it is removed from Kubernetes service load balancers. + +{{% alert title="Note" color="primary" %}} +The Dapr sidecar will be in ready state once the application is accessible on its configured port. The application cannot access the Dapr components during application start up/initialization. +{{% /alert %}} + +#### How to configure a readiness probe in Kubernetes Readiness probes are configured similarly to liveness probes. The only difference is that you use the `readinessProbe` field instead of the `livenessProbe` field: @@ -66,7 +63,13 @@ Readiness probes are configured similarly to liveness probes. The only differenc periodSeconds: 3 ``` -### How the Dapr sidecar health endpoint is configured with Kubernetes +### Sidecar Injector + +When integrating with Kubernetes, the Dapr sidecar is injected with a Kubernetes probe configuration telling it to use the Dapr `healthz` endpoint. This is done by the "Sidecar Injector" system service. The integration with the kubelet is shown in the diagram below. + +Diagram of Dapr services interacting + +#### How the Dapr sidecar health endpoint is configured with Kubernetes As mentioned above, this configuration is done automatically by the Sidecar Injector service. This section describes the specific values that are set on the liveness and readiness probes. @@ -91,7 +94,7 @@ Dapr has its HTTP health endpoint `/v1.0/healthz` on port 3500. This can be used failureThreshold: 3 ``` -For more information refer to: +## Related links - [Endpoint health API]({{< ref health_api.md >}}) - [Actor health API]({{< ref "actors_api.md#health-check" >}}) diff --git a/daprdocs/content/en/operations/resiliency/policies.md b/daprdocs/content/en/operations/resiliency/policies.md index 56ab3cb9134..db72dd78c5c 100644 --- a/daprdocs/content/en/operations/resiliency/policies.md +++ b/daprdocs/content/en/operations/resiliency/policies.md @@ -1,14 +1,14 @@ --- type: docs -title: "Policies" +title: "Resiliency policies" linkTitle: "Policies" -weight: 4500 +weight: 200 description: "Configure resiliency policies for timeouts, retries, and circuit breakers" --- Define timeouts, retries, and circuit breaker policies under `policies`. Each policy is given a name so you can refer to them from the `targets` section in the resiliency spec. -> Note: Dapr offers default retries for specific APIs. [See here]({{< ref "#override-default-retries" >}}) to learn how you can overwrite default retry logic with user defined retry policies. +> Note: Dapr offers default retries for specific APIs. [See here]({{< ref "#overriding-default-retries" >}}) to learn how you can overwrite default retry logic with user defined retry policies. ## Timeouts @@ -299,4 +299,4 @@ The table below is a break down of which policies are applied when attempting to Try out one of the Resiliency quickstarts: - [Resiliency: Service-to-service]({{< ref resiliency-serviceinvo-quickstart.md >}}) -- [Resiliency: State Management]({{< ref resiliency-state-quickstart.md >}}) \ No newline at end of file +- [Resiliency: State Management]({{< ref resiliency-state-quickstart.md >}}) diff --git a/daprdocs/content/en/operations/resiliency/resiliency-overview.md b/daprdocs/content/en/operations/resiliency/resiliency-overview.md index bb6cdb502c8..e7564757a88 100644 --- a/daprdocs/content/en/operations/resiliency/resiliency-overview.md +++ b/daprdocs/content/en/operations/resiliency/resiliency-overview.md @@ -2,7 +2,7 @@ type: docs title: "Overview" linkTitle: "Overview" -weight: 4500 +weight: 100 description: "Configure Dapr retries, timeouts, and circuit breakers" --- @@ -20,6 +20,14 @@ Policies can then be applied to [targets]({{< ref "targets.md" >}}), which inclu Additionally, resiliency policies can be [scoped to specific apps]({{< ref "component-scopes.md#application-access-to-components-with-scopes" >}}). +## Demo video + +Learn more about [how to write resilient microservices with Dapr](https://youtu.be/uC-4Q5KFq98?si=JSUlCtcUNZLBM9rW). + + + +## Resiliency policy structure + Below is the general structure of a resiliency policy: ```yaml @@ -51,7 +59,7 @@ spec: # components and their applied policies here ``` -### Complete example policy +## Complete example policy ```yaml apiVersion: dapr.io/v1alpha1 diff --git a/daprdocs/content/en/operations/resiliency/targets.md b/daprdocs/content/en/operations/resiliency/targets.md index ffb5ead96fb..15090b2e0ba 100644 --- a/daprdocs/content/en/operations/resiliency/targets.md +++ b/daprdocs/content/en/operations/resiliency/targets.md @@ -2,8 +2,8 @@ type: docs title: "Targets" linkTitle: "Targets" -weight: 4500 -description: "Apply resiliency policies to apps, components and actors" +weight: 300 +description: "Apply resiliency policies to targets including apps, components and actors" --- ### Targets diff --git a/daprdocs/content/en/operations/support/breaking-changes-and-deprecations.md b/daprdocs/content/en/operations/support/breaking-changes-and-deprecations.md index 76c71eed315..50dc764c849 100644 --- a/daprdocs/content/en/operations/support/breaking-changes-and-deprecations.md +++ b/daprdocs/content/en/operations/support/breaking-changes-and-deprecations.md @@ -15,18 +15,18 @@ Breaking changes are defined as a change to any of the following that cause comp - Default configuration value - Command line argument - Published metric -- Kubernetes CRD template +- Kubernetes resource template - Publicly accessible API - Publicly visible SDK interface, method, class, or attribute Breaking changes can be applied right away to the following cases: -- Projects versioned at 0.x.y +- Projects that have not reached version 1.0.0 yet - Preview feature - Alpha API - Preview or Alpha interface, class, method or attribute in SDK - Dapr Component in Alpha or Beta -- Components-Contrib interface +- Interfaces for `github.com/dapr/components-contrib` - URLs in Docs and Blog - An **exceptional** case where it is **required** to fix a critical bug or security vulnerability. @@ -39,7 +39,9 @@ There is a process for applying breaking changes: - For example, feature X is announced to be deprecated in the 1.0.0 release notes and will then be removed in 1.2.0. ## Deprecations -Deprecations can apply to + +Deprecations can apply to: + 1. APIs, including alpha APIs 1. Preview features 1. Components @@ -58,11 +60,14 @@ After announcing a future breaking change, the change will happen in 2 releases | Feature | Deprecation announcement | Removal | |-----------------------|-----------------------|------------------------- | -| GET /v1.0/shutdown API (Users should use [POST API]({{< ref kubernetes-job.md >}}) instead) | 1.2.0 | 1.4.0 | +| GET /v1.0/shutdown API (Users should use [POST API]({{< ref kubernetes-job.md >}}) instead) | 1.2.0 | 1.4.0 | | Java domain builder classes deprecated (Users should use [setters](https://github.com/dapr/java-sdk/issues/587) instead) | Java SDK 1.3.0 | Java SDK 1.5.0 | -| Service invocation will no longer provide a default content type header of `application/json` when no content-type is specified. You must explicitly [set a content-type header]({{< ref "service_invocation_api.md#request-contents" >}}) for service invocation if your invoked apps rely on this header. | 1.7.0 | 1.9.0 | +| Service invocation will no longer provide a default content type header of `application/json` when no content-type is specified. You must explicitly [set a content-type header]({{< ref "service_invocation_api.md#request-contents" >}}) for service invocation if your invoked apps rely on this header. | 1.7.0 | 1.9.0 | | gRPC service invocation using `invoke` method is deprecated. Use proxy mode service invocation instead. See [How-To: Invoke services using gRPC ]({{< ref howto-invoke-services-grpc.md >}}) to use the proxy mode.| 1.9.0 | 1.10.0 | | The CLI flag `--app-ssl` (in both the Dapr CLI and daprd) has been deprecated in favor of using `--app-protocol` with values `https` or `grpcs`. [daprd:6158](https://github.com/dapr/dapr/issues/6158) [cli:1267](https://github.com/dapr/cli/issues/1267)| 1.11.0 | 1.13.0 | +| Hazelcast PubSub Component | 1.9.0 | 1.11.0 | +| Twitter Binding Component | 1.10.0 | 1.11.0 | +| NATS Streaming PubSub Component | 1.11.0 | 1.13.0 | ## Related links diff --git a/daprdocs/content/en/operations/support/support-release-policy.md b/daprdocs/content/en/operations/support/support-release-policy.md index e3b4899bfe6..915042374eb 100644 --- a/daprdocs/content/en/operations/support/support-release-policy.md +++ b/daprdocs/content/en/operations/support/support-release-policy.md @@ -32,59 +32,65 @@ Patch support is for supported versions (current and previous). The Dapr's sidecar image is published to both [GitHub Container Registry](https://github.com/dapr/dapr/pkgs/container/daprd) and [Docker Registry](https://hub.docker.com/r/daprio/daprd/tags). The default image contains all components. From version 1.11, Dapr also offers a variation of the sidecar image, containing only stable components. -* Default sidecar images: `daprio/daprd:` or `ghcr.io/dapr/daprd:` (for example `ghcr.io/dapr/daprd:1.11.0`) -* Sidecar images for stable components: `daprio/daprd:-stablecomponents` or `ghcr.io/dapr/daprd:-stablecomponents` (for example `ghcr.io/dapr/daprd:1.11.0-stablecomponents`) +* Default sidecar images: `daprio/daprd:` or `ghcr.io/dapr/daprd:` (for example `ghcr.io/dapr/daprd:1.11.1`) +* Sidecar images for stable components: `daprio/daprd:-stablecomponents` or `ghcr.io/dapr/daprd:-stablecomponents` (for example `ghcr.io/dapr/daprd:1.11.1-stablecomponents`) -On Kubernetes, the sidecar image can be overwritten for the application Deployment resource with the `dapr.io/sidecar-image` annotation. See more about [Dapr's arguments and annotations]({{}}). The default 'daprio/daprd:latest' image is used if not specified. +On Kubernetes, the sidecar image can be overwritten for the application Deployment resource with the `dapr.io/sidecar-image` annotation. See more about [Dapr's arguments and annotations]({{< ref "arguments-annotations-overview.md" >}}). The default 'daprio/daprd:latest' image is used if not specified. -Learn more about [Dapr components' certification lifecycle]({{}}). +Learn more about [Dapr components' certification lifecycle]({{< ref "certification-lifecycle.md" >}}). ## Supported versions The table below shows the versions of Dapr releases that have been tested together and form a "packaged" release. Any other combinations of releases are not supported. -| Release date | Runtime | CLI | SDKs | Dashboard | Status | -|--------------------|:--------:|:--------|---------|---------|---------| -| May 15th 2023 | 1.10.7
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported (current) | -| May 12th 2023 | 1.10.6
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported (current) | -| April 13 2023 |1.10.5
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported (current) | -| March 16 2023 | 1.10.4
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | -| March 14 2023 | 1.10.3
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | -| February 24 2023 | 1.10.2
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | -| February 20 2023 | 1.10.1
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | -| February 14 2023 | 1.10.0
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported| -| December 2nd 2022 | 1.9.5
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | -| November 17th 2022 | 1.9.4
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | -| November 4th 2022 | 1.9.3
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | -| November 1st 2022 | 1.9.2
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.1
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | -| October 26th 2022 | 1.9.1
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.1
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | -| October 13th 2022 | 1.9.0
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | -| October 26th 2022 | 1.8.6
| 1.8.1 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| October 13th 2022 | 1.8.5
| 1.8.1 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| August 10th 2022 | 1.8.4
| 1.8.1 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| July 29th 2022 | 1.8.3
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| July 21st 2022 | 1.8.2
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| July 20th 2022 | 1.8.1
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| July 7th 2022 | 1.8.0
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Supported | -| October 26th 2022 | 1.7.5
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.2.1 | 0.10.0 | Supported | -| May 31st 2022 | 1.7.4
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.2.1 | 0.10.0 | Supported | -| May 17th 2022 | 1.7.3
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.2.1 | 0.10.0 | Supported | -| Apr 22th 2022 | 1.7.2
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.1.0 | 0.10.0 | Supported | -| Apr 20th 2022 | 1.7.1
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.1.0 | 0.10.0 | Supported | -| Apr 7th 2022 | 1.7.0
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.1.0 | 0.10.0 | Supported | -| Apr 20th 2022 | 1.6.2
| 1.6.0 | Java 1.4.0
Go 1.3.1
PHP 1.1.0
Python 1.5.0
.NET 1.6.0
JS 2.0.0 | 0.9.0 | Unsupported | -| Mar 25th 2022 | 1.6.1
| 1.6.0 | Java 1.4.0
Go 1.3.1
PHP 1.1.0
Python 1.5.0
.NET 1.6.0
JS 2.0.0 | 0.9.0 | Unsupported | -| Jan 25th 2022 | 1.6.0
| 1.6.0 | Java 1.4.0
Go 1.3.1
PHP 1.1.0
Python 1.5.0
.NET 1.6.0
JS 2.0.0 | 0.9.0 | Unsupported | -| Mar 25th 2022 | 1.5.2
| 1.6.0 | Java 1.3.0
Go 1.3.0
PHP 1.1.0
Python 1.4.0
.NET 1.5.0
JS 1.0.2 | 0.9.0 | Unsupported | -| Dec 6th 2021 | 1.5.1
| 1.5.1 | Java 1.3.0
Go 1.3.0
PHP 1.1.0
Python 1.4.0
.NET 1.5.0
JS 1.0.2 | 0.9.0 | Unsupported | -| Nov 11th 2021 | 1.5.0
| 1.5.0 | Java 1.3.0
Go 1.3.0
PHP 1.1.0
Python 1.4.0
.NET 1.5.0
JS 1.0.2 | 0.9.0 | Unsupported | -| Dev 6th 2021 | 1.4.4
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | -| Oct 7th 2021 | 1.4.3
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | -| Sep 24th 2021 | 1.4.2
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | -| Sep 22nd 2021 | 1.4.1
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | -| Sep 15th 2021 | 1.4
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | -| Sep 14th 2021 | 1.3.1
| 1.3.0 | Java 1.2.0
Go 1.2.0
PHP 1.1.0
Python 1.2.0
.NET 1.3.0 | 0.7.0 | Unsupported | -| Jul 26th 2021 | 1.3
| 1.3.0 | Java 1.2.0
Go 1.2.0
PHP 1.1.0
Python 1.2.0
.NET 1.3.0 | 0.7.0 | Unsupported | +| Release date | Runtime | CLI | SDKs | Dashboard | Status | Release notes | +|--------------------|:--------:|:--------|---------|---------|---------|------------| +| August 31st 2023 | 1.11.3
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.3 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.3) | +| July 20th 2023 | 1.11.2
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.2 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.2) | +| June 22nd 2023 | 1.11.1
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.1 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.1) | +| June 12th 2023 | 1.11.0
| 1.11.0 | Java 1.9.0
Go 1.8.0
PHP 1.1.0
Python 1.10.0
.NET 1.11.0
JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.0) | +| July 20th 2023 | 1.10.9
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | [v1.10.9 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.9) | +| June 22nd 2023 | 1.10.8
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | [v1.10.8 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.8) | +| May 15th 2023 | 1.10.7
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | | +| May 12th 2023 | 1.10.6
| 1.10.0 | Java 1.8.0
Go 1.7.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | | +| April 13 2023 |1.10.5
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 3.0.0 | 0.11.0 | Supported | | +| March 16 2023 | 1.10.4
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | | +| March 14 2023 | 1.10.3
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | | +| February 24 2023 | 1.10.2
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | | +| February 20 2023 | 1.10.1
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported | | +| February 14 2023 | 1.10.0
| 1.10.0 | Java 1.8.0
Go 1.6.0
PHP 1.1.0
Python 1.9.0
.NET 1.10.0
JS 2.5.0 | 0.11.0 | Supported| | +| December 2nd 2022 | 1.9.5
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | | +| November 17th 2022 | 1.9.4
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | | +| November 4th 2022 | 1.9.3
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | | +| November 1st 2022 | 1.9.2
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.1
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | | +| October 26th 2022 | 1.9.1
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.1
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | | +| October 13th 2022 | 1.9.0
| 1.9.1 | Java 1.7.0
Go 1.6.0
PHP 1.1.0
Python 1.8.3
.NET 1.9.0
JS 2.4.2 | 0.11.0 | Supported | | +| October 26th 2022 | 1.8.6
| 1.8.1 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| October 13th 2022 | 1.8.5
| 1.8.1 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| August 10th 2022 | 1.8.4
| 1.8.1 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| July 29th 2022 | 1.8.3
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| July 21st 2022 | 1.8.2
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| July 20th 2022 | 1.8.1
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| July 7th 2022 | 1.8.0
| 1.8.0 | Java 1.6.0
Go 1.5.0
PHP 1.1.0
Python 1.7.0
.NET 1.8.0
JS 2.3.0 | 0.11.0 | Unsupported | | +| October 26th 2022 | 1.7.5
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.2.1 | 0.10.0 | Unsupported | | +| May 31st 2022 | 1.7.4
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.2.1 | 0.10.0 | Unsupported | | +| May 17th 2022 | 1.7.3
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.2.1 | 0.10.0 | Unsupported | | +| Apr 22th 2022 | 1.7.2
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.1.0 | 0.10.0 | Unsupported | | +| Apr 20th 2022 | 1.7.1
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.1.0 | 0.10.0 | Unsupported | | +| Apr 7th 2022 | 1.7.0
| 1.7.0 | Java 1.5.0
Go 1.4.0
PHP 1.1.0
Python 1.6.0
.NET 1.7.0
JS 2.1.0 | 0.10.0 | Unsupported | | +| Apr 20th 2022 | 1.6.2
| 1.6.0 | Java 1.4.0
Go 1.3.1
PHP 1.1.0
Python 1.5.0
.NET 1.6.0
JS 2.0.0 | 0.9.0 | Unsupported | | +| Mar 25th 2022 | 1.6.1
| 1.6.0 | Java 1.4.0
Go 1.3.1
PHP 1.1.0
Python 1.5.0
.NET 1.6.0
JS 2.0.0 | 0.9.0 | Unsupported | | +| Jan 25th 2022 | 1.6.0
| 1.6.0 | Java 1.4.0
Go 1.3.1
PHP 1.1.0
Python 1.5.0
.NET 1.6.0
JS 2.0.0 | 0.9.0 | Unsupported | | +| Mar 25th 2022 | 1.5.2
| 1.6.0 | Java 1.3.0
Go 1.3.0
PHP 1.1.0
Python 1.4.0
.NET 1.5.0
JS 1.0.2 | 0.9.0 | Unsupported | | +| Dec 6th 2021 | 1.5.1
| 1.5.1 | Java 1.3.0
Go 1.3.0
PHP 1.1.0
Python 1.4.0
.NET 1.5.0
JS 1.0.2 | 0.9.0 | Unsupported | | +| Nov 11th 2021 | 1.5.0
| 1.5.0 | Java 1.3.0
Go 1.3.0
PHP 1.1.0
Python 1.4.0
.NET 1.5.0
JS 1.0.2 | 0.9.0 | Unsupported | | +| Dev 6th 2021 | 1.4.4
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | | +| Oct 7th 2021 | 1.4.3
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | | +| Sep 24th 2021 | 1.4.2
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | | +| Sep 22nd 2021 | 1.4.1
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | | +| Sep 15th 2021 | 1.4
| 1.4.0 | Java 1.3.0
Go 1.2.0
PHP 1.1.0
Python 1.3.0
.NET 1.4.0 | 0.8.0 | Unsupported | | +| Sep 14th 2021 | 1.3.1
| 1.3.0 | Java 1.2.0
Go 1.2.0
PHP 1.1.0
Python 1.2.0
.NET 1.3.0 | 0.7.0 | Unsupported | | +| Jul 26th 2021 | 1.3
| 1.3.0 | Java 1.2.0
Go 1.2.0
PHP 1.1.0
Python 1.2.0
.NET 1.3.0 | 0.7.0 | Unsupported | | ## Upgrade paths @@ -115,7 +121,9 @@ General guidance on upgrading can be found for [self hosted mode]({{< ref self-h | | 1.9.6 | 1.10.7 | | 1.8.0 to 1.8.6 | N/A | 1.9.6 | | 1.9.0 | N/A | 1.9.6 | -| 1.10.0 | N/A | 1.10.7 | +| 1.10.0 | N/A | 1.10.8 | +| 1.11.0 | N/A | 1.11.3 | + ## Upgrade on Hosting platforms diff --git a/daprdocs/content/en/operations/troubleshooting/common_issues.md b/daprdocs/content/en/operations/troubleshooting/common_issues.md index 07a0bf170d5..346b680c68f 100644 --- a/daprdocs/content/en/operations/troubleshooting/common_issues.md +++ b/daprdocs/content/en/operations/troubleshooting/common_issues.md @@ -94,7 +94,7 @@ There are some known cases where this might not properly work: - Make sure the kube api server can reach the following webhooks services: - [Sidecar Mutating Webhook Injector Service](https://github.com/dapr/dapr/blob/44235fe8e8799589bb393a3124d2564db2dd6885/charts/dapr/charts/dapr_sidecar_injector/templates/dapr_sidecar_injector_deployment.yaml#L157) at port __4000__ that is served from the sidecar injector. - - [CRD Conversion Webhook Service](https://github.com/dapr/dapr/blob/44235fe8e8799589bb393a3124d2564db2dd6885/charts/dapr/charts/dapr_operator/templates/dapr_operator_service.yaml#L28) at port __19443__ that is served from the operator. + - [Resource Conversion Webhook Service](https://github.com/dapr/dapr/blob/44235fe8e8799589bb393a3124d2564db2dd6885/charts/dapr/charts/dapr_operator/templates/dapr_operator_service.yaml#L28) at port __19443__ that is served from the operator. Check with your cluster administrators to setup allow ingress rules to the above ports, __4000__ and __19443__, in the cluster from the kube api servers. diff --git a/daprdocs/content/en/reference/api/bindings_api.md b/daprdocs/content/en/reference/api/bindings_api.md index d0981de2bbb..1dbe37b7ee0 100644 --- a/daprdocs/content/en/reference/api/bindings_api.md +++ b/daprdocs/content/en/reference/api/bindings_api.md @@ -37,6 +37,50 @@ If running on kubernetes apply the component to your cluster. > **Note:** In production never place passwords or secrets within Dapr component files. For information on securely storing and retrieving secrets using secret stores refer to [Setup Secret Store]({{< ref setup-secret-store >}}) +### Binding direction (optional) + +In some scenarios, it would be useful to provide additional information to Dapr to indicate the direction supported by the binding component. + +Providing the binding `direction` helps the Dapr sidecar avoid the `"wait for the app to become ready"` state, where it waits indefinitely for the application to become available. This decouples the lifecycle dependency between the Dapr sidecar and the application. + +You can specify the `direction` field as part of the component's metadata. The valid values for this field are: +- `"input"` +- `"output"` +- `"input, output"` + +{{% alert title="Note" color="primary" %}} +It is highly recommended that all bindings should include the `direction` property. +{{% /alert %}} + +Here a few scenarios when the `"direction"` metadata field could help: + +- When an application (detached from the sidecar) runs as a serverless workload and is scaled to zero, the `"wait for the app to become ready"` check done by the Dapr sidecar becomes pointless. + +- If the detached Dapr sidecar is scaled to zero and the application reaches the sidecar (before even starting an HTTP server), the `"wait for the app to become ready"` deadlocks the app and the sidecar into waiting for each other. + +### Example + +```yaml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: kafkaevent +spec: + type: bindings.kafka + version: v1 + metadata: + - name: brokers + value: "http://localhost:5050" + - name: topics + value: "someTopic" + - name: publishTopic + value: "someTopic2" + - name: consumerGroup + value: "group1" + - name: "direction" + value: "input, output" +``` + ## Invoking Service Code Through Input Bindings A developer who wants to trigger their app using an input binding can listen on a `POST` http endpoint with the route name being the same as `metadata.name`. diff --git a/daprdocs/content/en/reference/api/configuration_api.md b/daprdocs/content/en/reference/api/configuration_api.md index a1d8784724e..3fe4ed283ea 100644 --- a/daprdocs/content/en/reference/api/configuration_api.md +++ b/daprdocs/content/en/reference/api/configuration_api.md @@ -21,7 +21,7 @@ GET http://localhost:/v1.0/configuration/ Parameter | Description --------- | ----------- `daprPort` | The Dapr port -`storename` | The `metadata.name` field component file. Refer to the [component schema]({{< ref component-schema.md>}}) +`storename` | The `metadata.name` field component file. Refer to the [component spec]({{< ref component-schema.md>}}) #### Query Parameters @@ -83,7 +83,7 @@ GET http://localhost:/v1.0/configuration//subscribe Parameter | Description --------- | ----------- `daprPort` | The Dapr port -`storename` | The `metadata.name` field component file. Refer to the [component schema]({{< ref component-schema.md>}}) +`storename` | The `metadata.name` field component file. Refer to the [component spec]({{< ref component-schema.md>}}) #### Query Parameters @@ -149,7 +149,7 @@ GET http://localhost:/v1.0/configuration// Parameter | Description --------- | ----------- `daprPort` | The Dapr port -`storename` | The `metadata.name` field component file. Refer to the [component schema]({{< ref component-schema.md>}}) +`storename` | The `metadata.name` field component file. Refer to the [component spec]({{< ref component-schema.md>}}) `subscription-id` | The value from the `id` field returned from the response of the subscribe endpoint #### Query Parameters @@ -172,7 +172,7 @@ Code | Description #### Response Body -``` +```json { "ok" : true } @@ -181,7 +181,25 @@ Code | Description ### Example ```shell -curl -X GET 'http://localhost:3500/v1.0/configuration/mystore/bf3aa454-312d-403c-af95-6dec65058fa2/unsubscribe' +curl -X GET 'http://localhost:3500/v1.0-alpha1/configuration/mystore/bf3aa454-312d-403c-af95-6dec65058fa2/unsubscribe' +``` + +> The above command returns the following JSON: + +In case of successful operation: + +```json +{ + "ok": true +} +``` +In case of unsuccessful operation: + +```json +{ + "ok": false, + "message": "" +} ``` ## Optional application (user code) routes @@ -201,7 +219,7 @@ POST http://localhost:/configuration// Parameter | Description --------- | ----------- `appPort` | The application port -`storename` | The `metadata.name` field component file. Refer to the [component schema]({{< ref component-schema.md>}}) +`storename` | The `metadata.name` field component file. Refer to the [component spec]({{< ref component-schema.md>}}) `key` | The key subscribed to #### Request Body diff --git a/daprdocs/content/en/reference/api/pubsub_api.md b/daprdocs/content/en/reference/api/pubsub_api.md index 68619a7531c..03068cc3b9c 100644 --- a/daprdocs/content/en/reference/api/pubsub_api.md +++ b/daprdocs/content/en/reference/api/pubsub_api.md @@ -179,7 +179,7 @@ Example: "topic": "newOrder", "route": "/orders", "metadata": { - "rawPayload": "true", + "rawPayload": "true" } } ] diff --git a/daprdocs/content/en/reference/api/secrets_api.md b/daprdocs/content/en/reference/api/secrets_api.md index eae6f390108..89e8a405a5d 100644 --- a/daprdocs/content/en/reference/api/secrets_api.md +++ b/daprdocs/content/en/reference/api/secrets_api.md @@ -28,26 +28,20 @@ name | the name of the secret to get #### Query Parameters -Some secret stores have **optional** metadata properties. metadata is populated using query parameters: +Some secret stores support **optional**, per-request metadata properties. Use query parameters to provide those properties. For example: ``` GET http://localhost:/v1.0/secrets//?metadata.version_id=15 ``` -##### GCP Secret Manager -The following optional meta can be provided to the GCP Secret Manager component +Observe that not all secret stores support the same set of parameters. For example: +- Hashicorp Vault, GCP Secret Manager and AWS Secret Manager support the `version_id` parameter +- Only AWS Secret Manager supports the `version_stage` parameter +- Only Kubernetes Secrets supports the `namespace` parameter +Check each [secret store's documentation]({{< ref supported-secret-stores.md >}}) for the list of supported parameters. -Query Parameter | Description ---------- | ----------- -metadata.version_id | version for the given secret key -##### AWS Secret Manager -The following optional meta can be provided to the AWS Secret Manager component -Query Parameter | Description ---------- | ----------- -metadata.version_id | version for the given secret key -metadata.version_stage | version stage for the given secret key ### HTTP Response @@ -101,17 +95,11 @@ Code | Description ### Examples ```shell -curl http://localhost:3500/v1.0/secrets/vault/db-secret +curl http://localhost:3500/v1.0/secrets/mySecretStore/db-secret ``` ```shell -curl http://localhost:3500/v1.0/secrets/vault/db-secret?metadata.version_id=15&metadata.version_stage=AAA -``` - -> Note, in case of deploying into namespace other than default, the above query will also have to include the namespace metadata (e.g. `production` below) - -```shell -curl http://localhost:3500/v1.0/secrets/vault/db-secret?metadata.version_id=15&?metadata.namespace=production +curl http://localhost:3500/v1.0/secrets/myAwsSecretStore/db-secret?metadata.version_id=15&metadata.version_stage=production ``` ## Get Bulk Secret diff --git a/daprdocs/content/en/reference/api/state_api.md b/daprdocs/content/en/reference/api/state_api.md index ffd073b98ec..7dc1600d954 100644 --- a/daprdocs/content/en/reference/api/state_api.md +++ b/daprdocs/content/en/reference/api/state_api.md @@ -68,6 +68,9 @@ POST http://localhost:3500/v1.0/state/myStore?metadata.contentType=application/j ``` > All URL parameters are case-sensitive. +> Since `||` is a reserved string it cannot be used in the `` +> field. + #### Request Body A JSON array of state objects. Each state object is comprised with the following fields: diff --git a/daprdocs/content/en/reference/arguments-annotations-overview.md b/daprdocs/content/en/reference/arguments-annotations-overview.md index 1ddf575826d..a1c044a68d8 100644 --- a/daprdocs/content/en/reference/arguments-annotations-overview.md +++ b/daprdocs/content/en/reference/arguments-annotations-overview.md @@ -17,37 +17,38 @@ This table is meant to help users understand the equivalent options for running | `--app-port` | `--app-port` | `-p` | `dapr.io/app-port` | This parameter tells Dapr which port your application is listening on | | `--components-path` | `--components-path` | `-d` | not supported | **Deprecated** in favor of `--resources-path` | | `--resources-path` | `--resources-path` | `-d` | not supported | Path for components directory. If empty, components will not be loaded. | -| `--config` | `--config` | `-c` | `dapr.io/config` | Tells Dapr which Configuration CRD to use | +| `--config` | `--config` | `-c` | `dapr.io/config` | Tells Dapr which Configuration resource to use | | `--control-plane-address` | not supported | | not supported | Address for a Dapr control plane | | `--dapr-grpc-port` | `--dapr-grpc-port` | | not supported | gRPC port for the Dapr API to listen on (default "50001") | | `--dapr-http-port` | `--dapr-http-port` | | not supported | The HTTP port for the Dapr API | | `--dapr-http-max-request-size` | --dapr-http-max-request-size | | `dapr.io/http-max-request-size` | Increasing max size of request body http and grpc servers parameter in MB to handle uploading of big files. Default is `4` MB | | `--dapr-http-read-buffer-size` | --dapr-http-read-buffer-size | | `dapr.io/http-read-buffer-size` | Increasing max size of http header read buffer in KB to handle when sending multi-KB headers. The default 4 KB. When sending bigger than default 4KB http headers, you should set this to a larger value, for example 16 (for 16KB) | -| not supported | `--image` | | `dapr.io/sidecar-image` | Dapr sidecar image. Default is daprio/daprd:latest. The Dapr sidecar uses this image instead of the latest default image. Use this when building your own custom image of Dapr and or [using an alternative stable Dapr image]({{}}) | +| not supported | `--image` | | `dapr.io/sidecar-image` | Dapr sidecar image. Default is daprio/daprd:latest. The Dapr sidecar uses this image instead of the latest default image. Use this when building your own custom image of Dapr and or [using an alternative stable Dapr image]({{< ref "support-release-policy.md#build-variations" >}}) | | `--internal-grpc-port` | not supported | | not supported | gRPC port for the Dapr Internal API to listen on | -| `--enable-metrics` | not supported | | configuration spec | Enable prometheus metric (default true) | +| `--enable-metrics` | not supported | | configuration spec | Enable [prometheus metric]({{< ref prometheus >}}) (default true) | | `--enable-mtls` | not supported | | configuration spec | Enables automatic mTLS for daprd to daprd communication channels | -| `--enable-profiling` | `--enable-profiling` | | `dapr.io/enable-profiling` | Enable profiling | +| `--enable-profiling` | `--enable-profiling` | | `dapr.io/enable-profiling` | [Enable profiling]({{< ref profiling-debugging >}}) | | `--unix-domain-socket` | `--unix-domain-socket` | `-u` | `dapr.io/unix-domain-socket-path` | The parent directory of socket file. On Linux, when communicating with the Dapr sidecar, use unix domain sockets for lower latency and greater throughput compared to TCP ports. Not available on Windows OS. | -| `--log-as-json` | not supported | | `dapr.io/log-as-json` | Setting this parameter to `true` outputs logs in JSON format. Default is `false` | -| `--log-level` | `--log-level` | | `dapr.io/log-level` | Sets the log level for the Dapr sidecar. Allowed values are `debug`, `info`, `warn`, `error`. Default is `info` | -| `--enable-api-logging` | `--enable-api-logging` | | `dapr.io/enable-api-logging` | Enables API logging for the Dapr sidecar | -| `--app-max-concurrency` | `--app-max-concurrency` | | `dapr.io/app-max-concurrency` | Limit the concurrency of your application. A valid value is any number larger than `0`| +| `--log-as-json` | not supported | | `dapr.io/log-as-json` | Setting this parameter to `true` outputs [logs in JSON format]({{< ref logs >}}). Default is `false` | +| `--log-level` | `--log-level` | | `dapr.io/log-level` | Sets the [log level]({{< ref logs-troubleshooting >}}) for the Dapr sidecar. Allowed values are `debug`, `info`, `warn`, `error`. Default is `info` | +| `--enable-api-logging` | `--enable-api-logging` | | `dapr.io/enable-api-logging` | [Enables API logging]({{< ref "api-logs-troubleshooting.md#configuring-api-logging-in-kubernetes" >}}) for the Dapr sidecar | +| `--app-max-concurrency` | `--app-max-concurrency` | | `dapr.io/app-max-concurrency` | Limit the [concurrency of your application]({{< ref "control-concurrency.md#setting-app-max-concurrency" >}}). A valid value is any number larger than `0`| | `--metrics-port` | `--metrics-port` | | `dapr.io/metrics-port` | Sets the port for the sidecar metrics server. Default is `9090` | -| `--mode` | not supported | | not supported | Runtime mode for Dapr (default "standalone") | +| `--mode` | not supported | | not supported | Runtime hosting option mode for Dapr, either `"standalone"` or `"kubernetes"` (default `"standalone"`). [Learn more.]({{< ref hosting >}}) | | `--placement-host-address` | `--placement-host-address` | | `dapr.io/placement-host-address` | Comma separated list of addresses for Dapr Actor Placement servers. When no annotation is set, the default value is set by the Sidecar Injector. When the annotation is set and the value is empty, the sidecar does not connect to Placement server. This can be used when there are no actors running in the sidecar. When the annotation is set and the value is not empty, the sidecar connects to the configured address. For example: `127.0.0.1:50057,127.0.0.1:50058` | | `--profiling-port` | `--profiling-port` | | not supported | The port for the profile server (default `7777`) | | `--app-protocol` | `--app-protocol` | `-P` | `dapr.io/app-protocol` | Configures the protocol Dapr uses to communicate with your app. Valid options are `http`, `grpc`, `https` (HTTP with TLS), `grpcs` (gRPC with TLS), `h2c` (HTTP/2 Cleartext). Note that Dapr does not validate TLS certificates presented by the app. Default is `http` | -| `--enable-app-health-check` | `--enable-app-health-check` | | `dapr.io/enable-app-health-check` | Boolean that enables the health checks. Default is `false`. | -| `--app-health-check-path` | `--app-health-check-path` | | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC). Requires app health checks to be enabled. Default is `/health` | -| `--app-health-probe-interval` | `--app-health-probe-interval` | | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe. Requires app health checks to be enabled. Default is `5` | -| `--app-health-probe-timeout` | `--app-health-probe-timeout` | | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests. Requires app health checks to be enabled. Default is `500` | -| `--app-health-threshold` | `--app-health-threshold` | | `dapr.io/app-health-threshold"` | Max number of consecutive failures before the app is considered unhealthy. Requires app health checks to be enabled. Default is `3` | -| `--sentry-address` | `--sentry-address` | | not supported | Address for the Sentry CA service | +| `--enable-app-health-check` | `--enable-app-health-check` | | `dapr.io/enable-app-health-check` | Boolean that enables the [health checks]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `false`. | +| `--app-health-check-path` | `--app-health-check-path` | | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC). Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `/health`. | +| `--app-health-probe-interval` | `--app-health-probe-interval` | | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe. Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `5` | +| `--app-health-probe-timeout` | `--app-health-probe-timeout` | | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests. Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `500` | +| `--app-health-threshold` | `--app-health-threshold` | | `dapr.io/app-health-threshold"` | Max number of consecutive failures before the app is considered unhealthy. Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `3` | +| `--sentry-address` | `--sentry-address` | | not supported | Address for the [Sentry CA service]({{< ref sentry >}}) | | `--version` | `--version` | `-v` | not supported | Prints the runtime version | | `--dapr-graceful-shutdown-seconds` | not supported | | `dapr.io/graceful-shutdown-seconds` | Graceful shutdown duration in seconds for Dapr, the maximum duration before forced shutdown when waiting for all in-progress requests to complete. Defaults to `5`. If you are running in Kubernetes mode, this value should not be larger than the Kubernetes termination grace period, who's default value is `30`.| | not supported | not supported | | `dapr.io/enabled` | Setting this paramater to true injects the Dapr sidecar into the pod | -| not supported | not supported | | `dapr.io/api-token-secret` | Tells Dapr which Kubernetes secret to use for token based API authentication. By default this is not set | +| not supported | not supported | | `dapr.io/api-token-secret` | Tells Dapr which Kubernetes secret to use for [token-based API authentication]({{< ref api-token >}}). By default this is not set | +| not supported | not supported | | `dapr.io/app-token-secret` | Tells Dapr which Kubernetes secret to use for [token-based application authentication]({{< ref app-api-token >}}). By default, this is not set | | `--dapr-listen-addresses` | not supported | | `dapr.io/sidecar-listen-addresses` | Comma separated list of IP addresses that sidecar will listen to. Defaults to all in standalone mode. Defaults to `[::1],127.0.0.1` in Kubernetes. To listen to all IPv4 addresses, use `0.0.0.0`. To listen to all IPv6 addresses, use `[::]`.| | not supported | not supported | | `dapr.io/sidecar-cpu-limit` | Maximum amount of CPU that the Dapr sidecar can use. See valid values [here](https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/). By default this is not set| | not supported | not supported | | `dapr.io/sidecar-memory-limit` | Maximum amount of Memory that the Dapr sidecar can use. See valid values [here](https://kubernetes.io/docs/tasks/administer-cluster/manage-resources/quota-memory-cpu-namespace/). By default this is not set| @@ -62,7 +63,7 @@ This table is meant to help users understand the equivalent options for running | not supported | not supported | | `dapr.io/sidecar-readiness-probe-period-seconds` | How often (in seconds) to perform the sidecar readiness probe. Read more [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `6`| | not supported | not supported | | `dapr.io/sidecar-readiness-probe-threshold` | When the sidecar readiness probe fails, Kubernetes will try N times before giving up. In this case, the Pod will be marked Unready. Read more about `failureThreshold` [here](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes). Default is `3`| | not supported | not supported | | `dapr.io/env` | List of environment variable to be injected into the sidecar. Strings consisting of key=value pairs separated by a comma.| -| not supported | not supported | | `dapr.io/volume-mounts` | List of pod volumes to be mounted to the sidecar container in read-only mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. | -| not supported | not supported | | `dapr.io/volume-mounts-rw` | List of pod volumes to be mounted to the sidecar container in read-write mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. | -| `--disable-builtin-k8s-secret-store` | not supported | | `dapr.io/disable-builtin-k8s-secret-store` | Disables BuiltIn Kubernetes secret store. Default value is false. See [Kubernetes secret store component]({{}}) for details. | +| not supported | not supported | | `dapr.io/volume-mounts` | List of [pod volumes to be mounted to the sidecar container]({{< ref "kubernetes-volume-mounts" >}}) in read-only mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. | +| not supported | not supported | | `dapr.io/volume-mounts-rw` | List of [pod volumes to be mounted to the sidecar container]({{< ref "kubernetes-volume-mounts" >}}) in read-write mode. Strings consisting of `volume:path` pairs separated by a comma. Example, `"volume-1:/tmp/mount1,volume-2:/home/root/mount2"`. | +| `--disable-builtin-k8s-secret-store` | not supported | | `dapr.io/disable-builtin-k8s-secret-store` | Disables BuiltIn Kubernetes secret store. Default value is false. See [Kubernetes secret store component]({{< ref "kubernetes-secret-store.md" >}}) for details. | | not supported | not supported | | `dapr.io/sidecar-seccomp-profile-type` | Set the sidecar container's `securityContext.seccompProfile.type` to `Unconfined`, `RuntimeDefault`, or `Localhost`. By default, this annotation is not set on the Dapr sidecar, hence the field is omitted from sidecar container. | diff --git a/daprdocs/content/en/reference/cli/dapr-run.md b/daprdocs/content/en/reference/cli/dapr-run.md index 5587fc21fa2..9a519f98c72 100644 --- a/daprdocs/content/en/reference/cli/dapr-run.md +++ b/daprdocs/content/en/reference/cli/dapr-run.md @@ -27,7 +27,7 @@ dapr run [flags] [command] | `--app-max-concurrency` | | `unlimited` | The concurrency level of the application; default is unlimited | | `--app-port`, `-p` | `APP_PORT` | | The port your application is listening on | | `--app-protocol`, `-P` | | `http` | The protocol Dapr uses to talk to the application. Valid values are: `http`, `grpc`, `https` (HTTP with TLS), `grpcs` (gRPC with TLS), `h2c` (HTTP/2 Cleartext) | -| `--resources-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components`
Windows: `%USERPROFILE%\.dapr\components` | The path for components directory | +| `--resources-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components`
Windows: `%USERPROFILE%\.dapr\components` | The path for resources directory. If you've organized your resources into multiple folders (for example, components in one folder, resiliency policies in another), you can define multiple resource paths. See [example]({{< ref "#examples" >}}) below. | | `--app-channel-address` | | `127.0.0.1` | The network address the application listens on | | `--runtime-path` | | | Dapr runtime install path | | `--config`, `-c` | | Linux/Mac: `$HOME/.dapr/config.yaml`
Windows: `%USERPROFILE%\.dapr\config.yaml` | Dapr configuration file | @@ -78,4 +78,7 @@ dapr run --app-id myapp --app-port 5000 --app-protocol grpc -- go run main.go # Run a NodeJs application that listens to port 3000 with API logging enabled dapr run --app-id myapp --app-port 3000 --enable-api-logging -- node myapp.js + +# Pass multiple resource paths +dapr run --app-id myapp --resources-path path1 --resources-path path2 ``` diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/_index.md b/daprdocs/content/en/reference/components-reference/supported-bindings/_index.md index 6db86fefac4..4894dedfb82 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/_index.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/_index.md @@ -9,8 +9,10 @@ aliases: no_list: true --- -Every binding has its own unique set of properties. Click the name link to see the component YAML for each binding. +The following table lists input and output bindings supported by the Dapr bindings building block. [Learn how to set up different input and output binding components for Dapr bindings.]({{< ref setup-bindings.md >}}) {{< partial "components/description.html" >}} +Every binding component has its own set of properties. Click the name link to see the component specification for each binding. + {{< partial "components/bindings.html" >}} diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloud-dingtalk.md b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloud-dingtalk.md index 767f6fe904e..2d112e2aa98 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloud-dingtalk.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloud-dingtalk.md @@ -23,16 +23,21 @@ spec: value: "https://oapi.dingtalk.com/robot/send?access_token=******" - name: secret value: "****************" + - name: direction + value: "input, output" ``` + {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). {{% /alert %}} + ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|--------|--------|---------| -| id | Y | Input/Output |unique id| `"test_webhook_id"` -| url | Y | Input/Output |DingTalk's Webhook url | `"https://oapi.dingtalk.com/robot/send?access_token=******"` -| secret | N | Input/Output |the secret of DingTalk's Webhook | `"****************"` +| `id` | Y | Input/Output |Unique id| `"test_webhook_id"` +| `url` | Y | Input/Output |DingTalk's Webhook url | `"https://oapi.dingtalk.com/robot/send?access_token=******"` +| `secret` | N | Input/Output |The secret of DingTalk's Webhook | `"****************"` +| `direction` | N | Input/Output |The direction of the binding | `"input"`, `"output"`, `"input, output"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudoss.md b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudoss.md index cc7c52b38f0..4036bb03741 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudoss.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudoss.md @@ -28,6 +28,8 @@ spec: value: "[access-key]" - name: bucket value: "[bucket]" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -42,6 +44,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `accessKeyID` | Y | Output | Access key ID credential. | | `accessKey` | Y | Output | Access key credential. | | `bucket` | Y | Output | Name of the storage bucket. | +| `direction` | N | Output | Direction of the binding. | `"output"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudsls.md b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudsls.md index 8afc5c92156..b81db6d3cce 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudsls.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudsls.md @@ -26,6 +26,8 @@ spec: value: "[accessKey-secret]" - name: Endpoint value: "[endpoint]" + - name: direction + value: "output" ``` ## Spec metadata fields @@ -35,6 +37,7 @@ spec: | `AccessKeyID` | Y | Output | Access key ID credential. | | `AccessKeySecret` | Y | Output | Access key credential secret | | `Endpoint` | Y | Output | Alicloud SLS endpoint. | +| `direction` | N | Output | Direction of the binding. | `"output"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudtablestore.md b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudtablestore.md index 1a9173ff23c..61daf950eb6 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudtablestore.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/alicloudtablestore.md @@ -32,6 +32,8 @@ spec: value: "[table]" - name: endpoint value: "[endpoint]" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -47,6 +49,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `accessKey` | Y | Output | Access key credential. | | `instanceName` | Y | Output | Name of the instance. | | `tableName` | Y | Output | Name of the table. | +| `direction` | N | Output | Direction of the binding. | `"output"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/apns.md b/daprdocs/content/en/reference/components-reference/supported-bindings/apns.md index 2fb74483b76..3b534cc55ea 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/apns.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/apns.md @@ -21,24 +21,27 @@ spec: version: v1 metadata: - name: development - value: + value: "" - name: key-id - value: + value: "" - name: team-id - value: + value: "" - name: private-key secretKeyRef: name: - key: + key: "" + - name: direction + value: "output" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:| ----------------|---------|---------| -| development | Y | Output | Tells the binding which APNs service to use. Set to `"true"` to use the development service or `"false"` to use the production service. Default: `"true"` | `"true"` | -| key-id | Y | Output | The identifier for the private key from the Apple Developer Portal | `"private-key-id`" | -| team-id | Y | Output | The identifier for the organization or author from the Apple Developer Portal | `"team-id"` | -| private-key | Y | Output| Is a PKCS #8-formatted private key. It is intended that the private key is stored in the secret store and not exposed directly in the configuration. See [here](#private-key) for more details | `"pem file"` | +| `development` | Y | Output | Tells the binding which APNs service to use. Set to `"true"` to use the development service or `"false"` to use the production service. Default: `"true"` | `"true"` | +| `key-id` | Y | Output | The identifier for the private key from the Apple Developer Portal | `"private-key-id`" | +| `team-id` | Y | Output | The identifier for the organization or author from the Apple Developer Portal | `"team-id"` | +| `private-key` | Y | Output| Is a PKCS #8-formatted private key. It is intended that the private key is stored in the secret store and not exposed directly in the configuration. See [here](#private-key) for more details | `"pem file"` | +| `direction` | N | Output| The direction of the binding. | `"output"` | ### Private key The APNS binding needs a cryptographic private key in order to generate authentication tokens for the APNS service. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md b/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md index 3df3e28048b..b5ed204388e 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/blobstorage.md @@ -33,6 +33,8 @@ spec: # value: # - name: publicAccessLevel # value: +# - name: direction +# value: "output" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -49,6 +51,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to Blob Storage. (In case of saving a file with binary content). Defaults to `false` | `true`, `false` | | `getBlobRetryCount` | N | Output | Specifies the maximum number of HTTP GET requests that will be made while reading from a RetryReader Defaults to `10` | `1`, `2` | `publicAccessLevel` | N | Output | Specifies whether data in the container may be accessed publicly and the level of access (only used if the container is created by Dapr). Defaults to `none` | `blob`, `container`, `none` +| `direction` | N | Output | The direction of the binding. | `"output"` ### Azure Active Directory (AAD) authentication diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/cloudflare-queues.md b/daprdocs/content/en/reference/components-reference/supported-bindings/cloudflare-queues.md index 2a1420b6df6..b1196b54feb 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/cloudflare-queues.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/cloudflare-queues.md @@ -46,6 +46,9 @@ spec: # URL of the Worker (required if the Worker has been pre-created outside of Dapr) - name: workerUrl value: "" + # Direction of the binding + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -61,6 +64,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `cfAccountID` | Y/N | Output | Cloudflare account ID. Required to have Dapr manage the worker. | `"456789abcdef8b5588f3d134f74ac"def` | `cfAPIToken` | Y/N | Output | API token for Cloudflare. Required to have Dapr manage the Worker. | `"secret-key"` | `workerUrl` | Y/N | Output | URL of the Worker. Required if the Worker has been pre-provisioned outside of Dapr. | `"https://mydaprqueue.mydomain.workers.dev"` +| `direction` | N | Output | Direction of the binding. | `"output"` > When you configure Dapr to create your Worker for you, you may need to set a longer value for the `initTimeout` property of the component, to allow enough time for the Worker script to be deployed. For example: `initTimeout: "120s"` diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/commercetools.md b/daprdocs/content/en/reference/components-reference/supported-bindings/commercetools.md index 010edf1c897..94fd95d1484 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/commercetools.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/commercetools.md @@ -23,18 +23,19 @@ spec: version: v1 metadata: - name: region # required. - value: region + value: "region" - name: provider # required. - value: provider (gcp/aws) + value: "gcp" - name: projectKey # required. - value: project-key + value: "" - name: clientID # required. - value: ***************** + value: "*****************" - name: clientSecret # required. - value: ***************** + value: "*****************" - name: scopes # required. - value: scopes - + value: "" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -44,12 +45,13 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| region | Y | Output | The region of the commercetools project | `"europe-west1"` | -| provider | Y | Output | The cloud provider, either gcp or aws | `"gcp"` | -| projectKey | Y | Output | The commercetools project key | `"project-key"` | -| clientID | Y | Output | The commercetools client ID for the project | `"client ID"` | -| clientSecret | Y | Output | The commercetools client secret for the project | `"client secret"` | -| scopes | Y | Output | The commercetools scopes for the project | `"manage_project:project-key"` | +| `region` | Y | Output | The region of the commercetools project | `"europe-west1"` | +| `provider` | Y | Output | The cloud provider, either gcp or aws | `"gcp"`, `"aws"` | +| `projectKey` | Y | Output | The commercetools project key | | +| `clientID` | Y | Output | The commercetools client ID for the project | | +| `clientSecret` | Y | Output | The commercetools client secret for the project | | +| `scopes` | Y | Output | The commercetools scopes for the project | `"manage_project:project-key"` | +| `direction` | N | Output | The direction of the binding | `"output"` | For more information see [commercetools - Creating an API Client](https://docs.commercetools.com/getting-started/create-api-client#create-an-api-client) and [commercetools - Regions](https://docs.commercetools.com/api/general-concepts#regions). diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md b/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md index c9a54526f04..111ecab83c0 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdb.md @@ -22,15 +22,17 @@ spec: version: v1 metadata: - name: url - value: https://******.documents.azure.com:443/ + value: "https://******.documents.azure.com:443/" - name: masterKey - value: ***** + value: "*****" - name: database - value: db + value: "OrderDb" - name: collection - value: collection + value: "Orders" - name: partitionKey - value: message + value: "" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -41,11 +43,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|--------|---------|---------| -| url | Y | Output | The Cosmos DB url | `"https://******.documents.azure.com:443/"` | -| masterKey | Y | Output | The Cosmos DB account master key | `"master-key"` | -| database | Y | Output | The name of the Cosmos DB database | `"OrderDb"` | -| collection | Y | Output | The name of the container inside the database. | `"Orders"` | -| partitionKey | Y | Output | The name of the key to extract from the payload (document to be created) that is used as the partition key. This name must match the partition key specified upon creation of the Cosmos DB container. | `"OrderId"`, `"message"` | +| `url` | Y | Output | The Cosmos DB url | `"https://******.documents.azure.com:443/"` | +| `masterKey` | Y | Output | The Cosmos DB account master key | `"master-key"` | +| `database` | Y | Output | The name of the Cosmos DB database | `"OrderDb"` | +| `collection` | Y | Output | The name of the container inside the database. | `"Orders"` | +| `partitionKey` | Y | Output | The name of the key to extract from the payload (document to be created) that is used as the partition key. This name must match the partition key specified upon creation of the Cosmos DB container. | `"OrderId"`, `"message"` | +| `direction` | N | Output | The direction of the binding. | `"output"` | For more information see [Azure Cosmos DB resource model](https://docs.microsoft.com/azure/cosmos-db/account-databases-containers-items). diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdbgremlinapi.md b/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdbgremlinapi.md index 16641aa79a0..505bc5ca6bc 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdbgremlinapi.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/cosmosdbgremlinapi.md @@ -20,12 +20,14 @@ spec: version: v1 metadata: - name: url - value: wss://******.gremlin.cosmos.azure.com:443/ + value: "wss://******.gremlin.cosmos.azure.com:443/" - name: masterKey - value: ***** + value: "*****" - name: username - value: ***** - ``` + value: "*****" + - name: direction + value: "output" +``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -38,6 +40,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `url` | Y | Output | The Cosmos DB url for Gremlin APIs | `"wss://******.gremlin.cosmos.azure.com:443/"` | | `masterKey` | Y | Output | The Cosmos DB account master key | `"masterKey"` | | `username` | Y | Output | The username of the Cosmos DB database | `"/dbs//colls/"` | +| `direction` | N | Output | The direction of the binding | `"output"` | For more information see [Quickstart: Azure Cosmos Graph DB using Gremlin](https://docs.microsoft.com/azure/cosmos-db/graph/create-graph-console). diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/cron.md b/daprdocs/content/en/reference/components-reference/supported-bindings/cron.md index ace35d10495..6a046f781b0 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/cron.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/cron.md @@ -23,13 +23,16 @@ spec: metadata: - name: schedule value: "@every 15m" # valid cron schedule + - name: direction + value: "input" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|-------|--------|---------| -| schedule | Y | Input| The valid cron schedule to use. See [this](#schedule-format) for more details | `"@every 15m"` +| `schedule` | Y | Input| The valid cron schedule to use. See [this](#schedule-format) for more details | `"@every 15m"` +| `direction` | N | Input| The direction of the binding | `"input"` ### Schedule Format diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/dynamodb.md b/daprdocs/content/en/reference/components-reference/supported-bindings/dynamodb.md index 35f81adb968..63654df5c87 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/dynamodb.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/dynamodb.md @@ -23,16 +23,17 @@ spec: version: v1 metadata: - name: table - value: items + value: "items" - name: region - value: us-west-2 + value: "us-west-2" - name: accessKey - value: ***************** + value: "*****************" - name: secretKey - value: ***************** + value: "*****************" - name: sessionToken - value: ***************** - + value: "*****************" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -43,11 +44,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| table | Y | Output | The DynamoDB table name | `"items"` | -| region | Y | Output | The specific AWS region the AWS DynamoDB instance is deployed in | `"us-east-1"` | -| accessKey | Y | Output | The AWS Access Key to access this resource | `"key"` | -| secretKey | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | -| sessionToken | N | Output | The AWS session token to use | `"sessionToken"` | +| `table` | Y | Output | The DynamoDB table name | `"items"` | +| `region` | Y | Output | The specific AWS region the AWS DynamoDB instance is deployed in | `"us-east-1"` | +| `accessKey` | Y | Output | The AWS Access Key to access this resource | `"key"` | +| `secretKey` | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | +| `sessionToken` | N | Output | The AWS session token to use | `"sessionToken"` | +| `direction` | N | Output | The direction of the binding | `"output"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md b/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md index f0469b7bbb9..6288baee4b7 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/eventgrid.md @@ -46,6 +46,9 @@ spec: # Optional Input Binding Metadata - name: eventSubscriptionName value: "[EventSubscriptionName]" + # Optional metadata + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} @@ -66,6 +69,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `handshakePort` | Y | Input | The container port that the input binding listens on when receiving events on the webhook | `"9000"` | | `scope` | Y | Input | The identifier of the resource to which the event subscription needs to be created or updated. See the [scope section](#scope) for more details | `"/subscriptions/{subscriptionId}/"` | | `eventSubscriptionName` | N | Input | The name of the event subscription. Event subscription names must be between 3 and 64 characters long and should use alphanumeric letters only | `"name"` | +| `direction` | N | Input/Output | The direction of the binding | `"input"`, `"output"`, `"input, output"` | ### Scope diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md b/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md index dfe28434ba2..a4dc7701369 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/eventhubs.md @@ -55,6 +55,9 @@ spec: # Alternative to passing storageAccountKey - name: storageConnectionString value: "DefaultEndpointsProtocol=https;AccountName=;AccountKey=" + # Optional metadata + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} @@ -78,6 +81,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `storageAccountKey` | Y* | Input | Storage account key for the checkpoint store account.
* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"` | `storageConnectionString` | Y* | Input | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey="` | `storageContainerName` | Y | Input | Storage container name for the storage account name. | `"myeventhubstoragecontainer"` +| `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` ### Azure Active Directory (AAD) authentication diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/gcpbucket.md b/daprdocs/content/en/reference/components-reference/supported-bindings/gcpbucket.md index db2f788a5e2..c4097a525c6 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/gcpbucket.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/gcpbucket.md @@ -22,31 +22,33 @@ spec: version: v1 metadata: - name: bucket - value: mybucket + value: "mybucket" - name: type - value: service_account + value: "service_account" - name: project_id - value: project_111 + value: "project_111" - name: private_key_id - value: ************* + value: "*************" - name: client_email - value: name@domain.com + value: "name@domain.com" - name: client_id - value: '1111111111111111' + value: "1111111111111111" - name: auth_uri - value: https://accounts.google.com/o/oauth2/auth + value: "https://accounts.google.com/o/oauth2/auth" - name: token_uri - value: https://oauth2.googleapis.com/token + value: "https://oauth2.googleapis.com/token" - name: auth_provider_x509_cert_url - value: https://www.googleapis.com/oauth2/v1/certs + value: "https://www.googleapis.com/oauth2/v1/certs" - name: client_x509_cert_url - value: https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com + value: "https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com" - name: private_key - value: PRIVATE KEY + value: "PRIVATE KEY" - name: decodeBase64 - value: + value: "" - name: encodeBase64 - value: + value: "" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -57,19 +59,20 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| bucket | Y | Output | The bucket name | `"mybucket"` | -| type | Y | Output | Tge GCP credentials type | `"service_account"` | -| project_id | Y | Output | GCP project id| `projectId` -| private_key_id | Y | Output | GCP private key id | `"privateKeyId"` -| private_key | Y | Output | GCP credentials private key. Replace with x509 cert | `12345-12345` -| client_email | Y | Output | GCP client email | `"client@email.com"` -| client_id | Y | Output | GCP client id | `0123456789-0123456789` -| auth_uri | Y | Output | Google account OAuth endpoint | `https://accounts.google.com/o/oauth2/auth` -| token_uri | Y | Output | Google account token uri | `https://oauth2.googleapis.com/token` -| auth_provider_x509_cert_url | Y | Output | GCP credentials cert url | `https://www.googleapis.com/oauth2/v1/certs` -| client_x509_cert_url | Y | Output | GCP credentials project x509 cert url | `https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com` -| decodeBase64 | N | Output | Configuration to decode base64 file content before saving to bucket storage. (In case of saving a file with binary content). `true` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `true`, `false` | -| encodeBase64 | N | Output | Configuration to encode base64 file content before return the content. (In case of opening a file with binary content). `true` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `true`, `false` | +| `bucket` | Y | Output | The bucket name | `"mybucket"` | +| `type` | Y | Output | Tge GCP credentials type | `"service_account"` | +| `project_id` | Y | Output | GCP project id| `projectId` +| `private_key_id` | Y | Output | GCP private key id | `"privateKeyId"` +| `private_key` | Y | Output | GCP credentials private key. Replace with x509 cert | `12345-12345` +| `client_email` | Y | Output | GCP client email | `"client@email.com"` +| `client_id` | Y | Output | GCP client id | `0123456789-0123456789` +| `auth_uri` | Y | Output | Google account OAuth endpoint | `https://accounts.google.com/o/oauth2/auth` +| `token_uri` | Y | Output | Google account token uri | `https://oauth2.googleapis.com/token` +| `auth_provider_x509_cert_url` | Y | Output | GCP credentials cert url | `https://www.googleapis.com/oauth2/v1/certs` +| `client_x509_cert_url` | Y | Output | GCP credentials project x509 cert url | `https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com` +| `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to bucket storage. (In case of saving a file with binary content). `true` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `true`, `false` | +| `encodeBase64` | N | Output | Configuration to encode base64 file content before return the content. (In case of opening a file with binary content). `true` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `true`, `false` | +| `direction` | N | Output | The direction of the binding. | `"output"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/gcppubsub.md b/daprdocs/content/en/reference/components-reference/supported-bindings/gcppubsub.md index 4e9e6ed2f42..0608fa7d9aa 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/gcppubsub.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/gcppubsub.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup Azure Pub/Sub binding create a component of type `bindings.gcp.pubsub`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration. +To setup GCP Pub/Sub binding create a component of type `bindings.gcp.pubsub`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration. ```yaml @@ -22,29 +22,31 @@ spec: version: v1 metadata: - name: topic - value: topic1 + value: "topic1" - name: subscription - value: subscription1 + value: "subscription1" - name: type - value: service_account + value: "service_account" - name: project_id - value: project_111 + value: "project_111" - name: private_key_id - value: ************* + value: "*************" - name: client_email - value: name@domain.com + value: "name@domain.com" - name: client_id - value: '1111111111111111' + value: "1111111111111111" - name: auth_uri - value: https://accounts.google.com/o/oauth2/auth + value: "https://accounts.google.com/o/oauth2/auth" - name: token_uri - value: https://oauth2.googleapis.com/token + value: "https://oauth2.googleapis.com/token" - name: auth_provider_x509_cert_url - value: https://www.googleapis.com/oauth2/v1/certs + value: "https://www.googleapis.com/oauth2/v1/certs" - name: client_x509_cert_url - value: https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com + value: "https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com" - name: private_key - value: PRIVATE KEY + value: "PRIVATE KEY" + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -54,18 +56,19 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|-----------| -----|---------| -| topic | Y | Output | GCP Pub/Sub topic name | `"topic1"` | -| subscription | N | GCP Pub/Sub subscription name | `"name1"` | -| type | Y | Output | GCP credentials type | `service_account` -| project_id | Y | Output | GCP project id| `projectId` -| private_key_id | N | Output | GCP private key id | `"privateKeyId"` -| private_key | Y | Output | GCP credentials private key. Replace with x509 cert | `12345-12345` -| client_email | Y | Output | GCP client email | `"client@email.com"` -| client_id | N | Output | GCP client id | `0123456789-0123456789` -| auth_uri | N | Output | Google account OAuth endpoint | `https://accounts.google.com/o/oauth2/auth` -| token_uri | N | Output | Google account token uri | `https://oauth2.googleapis.com/token` -| auth_provider_x509_cert_url | N | Output |GCP credentials cert url | `https://www.googleapis.com/oauth2/v1/certs` -| client_x509_cert_url | N | Output | GCP credentials project x509 cert url | `https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com` +| `topic` | Y | Output | GCP Pub/Sub topic name | `"topic1"` | +| `subscription` | N | GCP Pub/Sub subscription name | `"name1"` | +| `type` | Y | Output | GCP credentials type | `service_account` +| `project_id` | Y | Output | GCP project id| `projectId` +| `private_key_id` | N | Output | GCP private key id | `"privateKeyId"` +| `private_key` | Y | Output | GCP credentials private key. Replace with x509 cert | `12345-12345` +| `client_email` | Y | Output | GCP client email | `"client@email.com"` +| `client_id` | N | Output | GCP client id | `0123456789-0123456789` +| `auth_uri` | N | Output | Google account OAuth endpoint | `https://accounts.google.com/o/oauth2/auth` +| `token_uri` | N | Output | Google account token uri | `https://oauth2.googleapis.com/token` +| `auth_provider_x509_cert_url` | N | Output |GCP credentials cert url | `https://www.googleapis.com/oauth2/v1/certs` +| `client_x509_cert_url` | N | Output | GCP credentials project x509 cert url | `https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com` +| `direction` | N |Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/graghql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/graghql.md index 9c7894e04d1..06ed28b0ae2 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/graghql.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/graghql.md @@ -22,11 +22,13 @@ spec: version: v1 metadata: - name: endpoint - value: http://localhost:8080/v1/graphql + value: "http://localhost:8080/v1/graphql" - name: header:x-hasura-access-key - value: adminkey + value: "adminkey" - name: header:Cache-Control - value: no-cache + value: "no-cache" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -37,9 +39,10 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| endpoint | Y | Output | GraphQL endpoint string See [here](#url-format) for more details | `"http://localhost:4000/graphql/graphql"` | -| header:[HEADERKEY] | N | Output | GraphQL header. Specify the header key in the `name`, and the header value in the `value`. | `"no-cache"` (see above) | -| variable:[VARIABLEKEY] | N | Output | GraphQL query variable. Specify the variable name in the `name`, and the variable value in the `value`. | `"123"` (see below) | +| `endpoint` | Y | Output | GraphQL endpoint string See [here](#url-format) for more details | `"http://localhost:4000/graphql/graphql"` | +| `header:[HEADERKEY]` | N | Output | GraphQL header. Specify the header key in the `name`, and the header value in the `value`. | `"no-cache"` (see above) | +| `variable:[VARIABLEKEY]` | N | Output | GraphQL query variable. Specify the variable name in the `name`, and the variable value in the `value`. | `"123"` (see below) | +| `direction` | N | Output | The direction of the binding | `"output"` | ### Endpoint and Header format diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/http.md b/daprdocs/content/en/reference/components-reference/supported-bindings/http.md index 685189cef1f..39355955457 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/http.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/http.md @@ -23,34 +23,37 @@ spec: version: v1 metadata: - name: url - value: http://something.com + value: "http://something.com" - name: MTLSRootCA - value: /Users/somepath/root.pem # OPTIONAL Secret store ref, , or + value: "/Users/somepath/root.pem" # OPTIONAL Secret store ref, , or - name: MTLSClientCert - value: /Users/somepath/client.pem # OPTIONAL Secret store ref, , or + value: "/Users/somepath/client.pem" # OPTIONAL Secret store ref, , or - name: MTLSClientKey - value: /Users/somepath/client.key # OPTIONAL Secret store ref, , or + value: "/Users/somepath/client.key" # OPTIONAL Secret store ref, , or - name: MTLSRenegotiation - value: RenegotiateOnceAsClient # OPTIONAL one of: RenegotiateNever, RenegotiateOnceAsClient, RenegotiateFreelyAsClient + value: "RenegotiateOnceAsClient" # OPTIONAL one of: RenegotiateNever, RenegotiateOnceAsClient, RenegotiateFreelyAsClient - name: securityToken # OPTIONAL secretKeyRef: name: mysecret - key: mytoken + key: "mytoken" - name: securityTokenHeader value: "Authorization: Bearer" # OPTIONAL
+ - name: direction + value: "output" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|--------|--------|---------| -| url | Y | Output |The base URL of the HTTP endpoint to invoke | `http://host:port/path`, `http://myservice:8000/customers` -| MTLSRootCA | N | Output |Secret store reference, path to root ca certificate, or pem encoded string | -| MTLSClientCert | N | Output |Secret store reference, path to client certificate, or pem encoded string | -| MTLSClientKey | N | Output |Secret store reference, path client private key, or pem encoded string | -| MTLSRenegotiation | N | Output |Type of TLS renegotiation to be used | `RenegotiateOnceAsClient` -| securityToken | N | Output |The value of a token to be added to an HTTP request as a header. Used together with `securityTokenHeader` | -| securityTokenHeader| N | Output |The name of the header for `securityToken` on an HTTP request that | +| `url` | Y | Output |The base URL of the HTTP endpoint to invoke | `http://host:port/path`, `http://myservice:8000/customers` +| `MTLSRootCA` | N | Output |Secret store reference, path to root ca certificate, or pem encoded string | +| `MTLSClientCert` | N | Output |Secret store reference, path to client certificate, or pem encoded string | +| `MTLSClientKey` | N | Output |Secret store reference, path client private key, or pem encoded string | +| `MTLSRenegotiation` | N | Output |Type of TLS renegotiation to be used | `RenegotiateOnceAsClient` +| `securityToken` | N | Output |The value of a token to be added to an HTTP request as a header. Used together with `securityTokenHeader` | +| `securityTokenHeader`| N | Output |The name of the header for `securityToken` on an HTTP request that | +| `direction`| N | Output |The direction of the binding | `"output"` ### How to configure MTLS related fields in Metadata The values for **MTLSRootCA**, **MTLSClientCert** and **MTLSClientKey** can be provided in three ways: diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/huawei-obs.md b/daprdocs/content/en/reference/components-reference/supported-bindings/huawei-obs.md index 5f0c9255a58..5c4e063f7a8 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/huawei-obs.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/huawei-obs.md @@ -20,16 +20,18 @@ spec: type: bindings.huawei.obs version: v1 - name: bucket - value: + value: "" - name: endpoint - value: + value: "" - name: accessKey - value: + value: "" - name: secretKey - value: + value: "" # optional fields - name: region - value: + value: "" + - name: direction + value: "" ``` {{% alert title="Warning" color="warning" %}} @@ -40,12 +42,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| bucket | Y | Output | The name of the Huawei OBS bucket to write to | `"My-OBS-Bucket"` | -| endpoint | Y | Output | The specific Huawei OBS endpoint | `"obs.cn-north-4.myhuaweicloud.com"` | -| accessKey | Y | Output | The Huawei Access Key (AK) to access this resource | `"************"` | -| secretKey | Y | Output | The Huawei Secret Key (SK) to access this resource | `"************"` | -| region | N | Output | The specific Huawei region of the bucket | `"cn-north-4"` | - +| `bucket` | Y | Output | The name of the Huawei OBS bucket to write to | `"My-OBS-Bucket"` | +| `endpoint` | Y | Output | The specific Huawei OBS endpoint | `"obs.cn-north-4.myhuaweicloud.com"` | +| `accessKey` | Y | Output | The Huawei Access Key (AK) to access this resource | `"************"` | +| `secretKey` | Y | Output | The Huawei Secret Key (SK) to access this resource | `"************"` | +| `region` | N | Output | The specific Huawei region of the bucket | `"cn-north-4"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/influxdb.md b/daprdocs/content/en/reference/components-reference/supported-bindings/influxdb.md index dd256df072c..f6fa6e45d47 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/influxdb.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/influxdb.md @@ -22,13 +22,15 @@ spec: version: v1 metadata: - name: url # Required - value: + value: "" - name: token # Required - value: + value: "" - name: org # Required - value: + value: "" - name: bucket # Required - value: + value: "" + - name: direction + value: "" ``` {{% alert title="Warning" color="warning" %}} @@ -39,10 +41,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| url | Y | Output | The URL for the InfluxDB instance| `"http://localhost:8086"` | -| token | Y | Output | The authorization token for InfluxDB | `"mytoken"` | -| org | Y | Output | The InfluxDB organization | `"myorg"` | -| bucket | Y | Output | Bucket name to write to | `"mybucket"` | +| `url` | Y | Output | The URL for the InfluxDB instance| `"http://localhost:8086"` | +| `token` | Y | Output | The authorization token for InfluxDB | `"mytoken"` | +| `org` | Y | Output | The InfluxDB organization | `"myorg"` | +| `bucket` | Y | Output | Bucket name to write to | `"mybucket"` | +| `direction` | N | Output | Direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md index 3ec4975529c..38afe3c503a 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md @@ -38,36 +38,42 @@ spec: - name: saslPassword # Required if authRequired is `true`. secretKeyRef: name: kafka-secrets - key: saslPasswordSecret + key: "saslPasswordSecret" + - name: saslMechanism + value: "SHA-512" - name: initialOffset # Optional. Used for input bindings. value: "newest" - name: maxMessageBytes # Optional. - value: 1024 + value: "1024" - name: version # Optional. - value: 1.0.0 + value: "1.0.0" + - name: direction + value: "input, output" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| topics | N | Input | A comma-separated string of topics. | `"mytopic1,topic2"` | -| brokers | Y | Input/Output | A comma-separated string of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"` | -| clientID | N | Input/Output | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. | `"my-dapr-app"` | -| consumerGroup | N | Input | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"` | -| consumeRetryEnabled | N | Input/Output | Enable consume retry by setting to `"true"`. Default to `false` in Kafka binding component. | `"true"`, `"false"` | -| publishTopic | Y | Output | The topic to publish to. | `"mytopic"` | -| authRequired | N | *Deprecated* | Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"` | -| authType | Y | Input/Output | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"` | -| saslUsername | N | Input/Output | The SASL username used for authentication. Only required if `authRequired` is set to `"true"`. | `"adminuser"` | -| saslPassword | N | Input/Output | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authRequired` is set to `"true"`. | `""`, `"KeFg23!"` | -| initialOffset | N | Input | The initial offset to use if no offset was previously committed. Should be "newest" or "oldest". Defaults to "newest". | `"oldest"` | -| maxMessageBytes | N | Input/Output | The maximum size in bytes allowed for a single Kafka message. Defaults to 1024. | `2048` | -| oidcTokenEndpoint | N | Input/Output | Full URL to an OAuth2 identity provider access token endpoint. Required when `authType` is set to `oidc` | "https://identity.example.com/v1/token" | -| oidcClientID | N | Input/Output | The OAuth2 client ID that has been provisioned in the identity provider. Required when `authType` is set to `oidc` | `dapr-kafka` | -| oidcClientSecret | N | Input/Output | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` | -| oidcScopes | N | Input/Output | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` | -| version | N | Input/Output | Kafka cluster version. Defaults to 2.0.0. Please note that this needs to be mandatorily set to `1.0.0` for EventHubs with Kafka. | `1.0.0` | +| `topics` | N | Input | A comma-separated string of topics. | `"mytopic1,topic2"` | +| `brokers` | Y | Input/Output | A comma-separated string of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"` | +| `clientID` | N | Input/Output | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. | `"my-dapr-app"` | +| `consumerGroup` | N | Input | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"` | +| `consumeRetryEnabled` | N | Input/Output | Enable consume retry by setting to `"true"`. Default to `false` in Kafka binding component. | `"true"`, `"false"` | +| `publishTopic` | Y | Output | The topic to publish to. | `"mytopic"` | +| `authRequired` | N | *Deprecated* | Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"` | +| `authType` | Y | Input/Output | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"` | +| `saslUsername` | N | Input/Output | The SASL username used for authentication. Only required if `authRequired` is set to `"true"`. | `"adminuser"` | +| `saslPassword` | N | Input/Output | The SASL password used for authentication. Can be `secretKeyRef` to use a [secret reference]({{< ref component-secrets.md >}}). Only required if `authRequired` is set to `"true"`. | `""`, `"KeFg23!"` | +| `saslMechanism` | N | Input/Output | The SASL authentication mechanism you'd like to use. Only required if `authtype` is set to `"password"`. If not provided, defaults to `PLAINTEXT`, which could cause a break for some services, like Amazon Managed Service for Kafka. | `"SHA-512", "SHA-256", "PLAINTEXT"` | +| `initialOffset` | N | Input | The initial offset to use if no offset was previously committed. Should be "newest" or "oldest". Defaults to "newest". | `"oldest"` | +| `maxMessageBytes` | N | Input/Output | The maximum size in bytes allowed for a single Kafka message. Defaults to 1024. | `"2048"` | +| `oidcTokenEndpoint` | N | Input/Output | Full URL to an OAuth2 identity provider access token endpoint. Required when `authType` is set to `oidc` | "https://identity.example.com/v1/token" | +| `oidcClientID` | N | Input/Output | The OAuth2 client ID that has been provisioned in the identity provider. Required when `authType` is set to `oidc` | `"dapr-kafka"` | +| `oidcClientSecret` | N | Input/Output | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` | +| `oidcScopes` | N | Input/Output | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` | +| `version` | N | Input/Output | Kafka cluster version. Defaults to 2.0.0. Please note that this needs to be mandatorily set to `1.0.0` for EventHubs with Kafka. | `"1.0.0"` | +| `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` | #### Note The metadata `version` must be set to `1.0.0` when using Azure EventHubs with Kafka. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kinesis.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kinesis.md index d823b70df74..656c7ba2291 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/kinesis.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kinesis.md @@ -23,20 +23,21 @@ spec: version: v1 metadata: - name: streamName - value: KINESIS_STREAM_NAME # Kinesis stream name + value: "KINESIS_STREAM_NAME" # Kinesis stream name - name: consumerName - value: KINESIS_CONSUMER_NAME # Kinesis consumer name + value: "KINESIS_CONSUMER_NAME" # Kinesis consumer name - name: mode - value: shared # shared - Shared throughput or extended - Extended/Enhanced fanout + value: "shared" # shared - Shared throughput or extended - Extended/Enhanced fanout - name: region - value: AWS_REGION #replace + value: "AWS_REGION" #replace - name: accessKey - value: AWS_ACCESS_KEY # replace + value: "AWS_ACCESS_KEY" # replace - name: secretKey - value: AWS_SECRET_KEY #replace + value: "AWS_SECRET_KEY" #replace - name: sessionToken - value: ***************** - + value: "*****************" + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -46,13 +47,14 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| mode | N | Input| The Kinesis stream mode. `shared`- Shared throughput, `extended` - Extended/Enhanced fanout methods. More details are [here](https://docs.aws.amazon.com/streams/latest/dev/building-consumers.html). Defaults to `"shared"` | `"shared"`, `"extended"` | -| streamName | Y | Input/Output | The AWS Kinesis Stream Name | `"stream"` | -| consumerName | Y | Input | The AWS Kinesis Consumer Name | `"myconsumer"` | -| region | Y | Output | The specific AWS region the AWS Kinesis instance is deployed in | `"us-east-1"` | -| accessKey | Y | Output | The AWS Access Key to access this resource | `"key"` | -| secretKey | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | -| sessionToken | N | Output | The AWS session token to use | `"sessionToken"` | +| `mode` | N | Input| The Kinesis stream mode. `shared`- Shared throughput, `extended` - Extended/Enhanced fanout methods. More details are [here](https://docs.aws.amazon.com/streams/latest/dev/building-consumers.html). Defaults to `"shared"` | `"shared"`, `"extended"` | +| `streamName` | Y | Input/Output | The AWS Kinesis Stream Name | `"stream"` | +| `consumerName` | Y | Input | The AWS Kinesis Consumer Name | `"myconsumer"` | +| `region` | Y | Output | The specific AWS region the AWS Kinesis instance is deployed in | `"us-east-1"` | +| `accessKey` | Y | Output | The AWS Access Key to access this resource | `"key"` | +| `secretKey` | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | +| `sessionToken` | N | Output | The AWS session token to use | `"sessionToken"` | +| `direction` | N | Input/Output | The direction of the binding | `"input"`, `"output"`, `"input, output"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kitex.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kitex.md index b262f8ebe54..ec4a19c1fb6 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/kitex.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kitex.md @@ -26,6 +26,16 @@ spec: type: bindings.kitex version: v1 metadata: + - name: hostPorts + value: "127.0.0.1:8888" + - name: destService + value: "echo" + - name: methodName + value: "echo" + - name: version + value: "0.5.0" + - name: direction + value: "output" ``` ## Spec metadata fields @@ -38,10 +48,11 @@ The `InvokeRequest.Metadata` for `bindings.kitex` requires the client to fill in | Field | Required | Binding support | Details | Example | |-------------|:--------:|--------|---------------------------------------------------------------------------------------------------------|--------------------| -| hostPorts | Y | Output | IP address and port information of the Kitex server (Thrift) | `"127.0.0.1:8888"` | -| destService | Y | Output | Service name of the Kitex server (Thrift) | `"echo"` | -| methodName | Y | Output | Method name under a specific service name of the Kitex server (Thrift) | `"echo"` | -| version | Y | Output | Kitex version | `"0.5.0"` | +| `hostPorts` | Y | Output | IP address and port information of the Kitex server (Thrift) | `"127.0.0.1:8888"` | +| `destService` | Y | Output | Service name of the Kitex server (Thrift) | `"echo"` | +| `methodName` | Y | Output | Method name under a specific service name of the Kitex server (Thrift) | `"echo"` | +| `version` | Y | Output | Kitex version | `"0.5.0"` | +| `direction` | N | Output | Direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md index 5303abb2694..5cf333ea213 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kubemq.md @@ -22,21 +22,24 @@ spec: version: v1 metadata: - name: address - value: localhost:50000 + value: "localhost:50000" - name: channel - value: queue1 + value: "queue1" + - name: direction + value: "input, output" ``` ## Spec metadata fields | Field | Required | Details | Example | |--------------------|:--------:|------------------------------------------------------------------------------------------------------------------------------|----------------------------------------| -| address | Y | Address of the KubeMQ server | `"localhost:50000"` | -| channel | Y | The Queue channel name | `queue1` | -| authToken | N | Auth JWT token for connection. Check out [KubeMQ Authentication](https://docs.kubemq.io/learn/access-control/authentication) | `ew...` | -| autoAcknowledged | N | Sets if received queue message is automatically acknowledged | `true` or `false` (default is `false`) | -| pollMaxItems | N | Sets the number of messages to poll on every connection | `1` | -| pollTimeoutSeconds | N | Sets the time in seconds for each poll interval | `3600` | +| `address` | Y | Address of the KubeMQ server | `"localhost:50000"` | +| `channel` | Y | The Queue channel name | `"queue1"` | +| `authToken` | N | Auth JWT token for connection. Check out [KubeMQ Authentication](https://docs.kubemq.io/learn/access-control/authentication) | `"ew..."` | +| `autoAcknowledged` | N | Sets if received queue message is automatically acknowledged | `"true"` or `"false"` (default is `"false"`) | +| `pollMaxItems` | N | Sets the number of messages to poll on every connection | `"1"` | +| `pollTimeoutSeconds` | N | Sets the time in seconds for each poll interval | `"3600"` | +| `direction` | N | The direction of the binding | `"input"`, `"output"`, `"input, output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md index c6286d30bde..25391a7748d 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md @@ -22,17 +22,20 @@ spec: version: v1 metadata: - name: namespace - value: + value: "" - name: resyncPeriodInSec value: "" + - name: direction + value: "input" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| namespace | Y | Input | The Kubernetes namespace to read events from | `"default"` | -| resyncPeriodInSec | N | Input | The period of time to refresh event list from Kubernetes API server. Defaults to `"10"` | `"15"` +| `namespace` | Y | Input | The Kubernetes namespace to read events from | `"default"` | +| `resyncPeriodInSec` | N | Input | The period of time to refresh event list from Kubernetes API server. Defaults to `"10"` | `"15"` +| `direction` | N | Input | The direction of the binding | `"input"` ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/localstorage.md b/daprdocs/content/en/reference/components-reference/supported-bindings/localstorage.md index f040e5bf13e..5290a69c357 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/localstorage.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/localstorage.md @@ -22,14 +22,17 @@ spec: version: v1 metadata: - name: rootPath - value: + value: "" + - name: direction + value: "" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|--------|---------|---------| -| rootPath | Y | Input / Output | The root path anchor to which files can be read / saved | `"/temp/files"` | +| `rootPath` | Y | Input / Output | The root path anchor to which files can be read / saved | `"/temp/files"` | +| `direction` | N | Input / Output | The direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/mqtt3.md b/daprdocs/content/en/reference/components-reference/supported-bindings/mqtt3.md index 0364ea36b7a..1bef18dfc5f 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/mqtt3.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/mqtt3.md @@ -34,6 +34,8 @@ spec: value: "false" - name: backOffMaxRetries value: "0" + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} @@ -53,6 +55,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `clientCert` | Required for using TLS | Input/Output | TLS client certificate in PEM format. Must be used with `clientKey`. | See example below | `clientKey` | Required for using TLS | Input/Output | TLS client key in PEM format. Must be used with `clientCert`. Can be `secretKeyRef` to use a secret reference. | See example below | `backOffMaxRetries` | N | Input | The maximum number of retries to process the message before returning an error. Defaults to `"0"`, which means that no retries will be attempted. `"-1"` can be specified to indicate that messages should be retried indefinitely until they are successfully processed or the application is shutdown. The component will wait 5 seconds between retries. | `"3"` +| `direction` | N | Input/Output | The direction of the binding | `"input"`, `"output"`, `"input, output"` ### Communication using TLS diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md index 103303ba082..d03dcfcab89 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md @@ -25,17 +25,19 @@ spec: version: v1 metadata: - name: url # Required, define DB connection in DSN format - value: + value: "" - name: pemPath # Optional - value: + value: "" - name: maxIdleConns - value: + value: "" - name: maxOpenConns - value: + value: "" - name: connMaxLifetime - value: + value: "" - name: connMaxIdleTime - value: + value: "" + - name: direction + value: "" ``` {{% alert title="Warning" color="warning" %}} @@ -47,12 +49,13 @@ Note that you can not use secret just for username/password. If you use secret, | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| url | Y | Output | Represent DB connection in Data Source Name (DNS) format. See [here](#ssl-connection-details) SSL details | `"user:password@tcp(localhost:3306)/dbname"` | -| pemPath | Y | Output | Path to the PEM file. Used with SSL connection | `"path/to/pem/file"` | -| maxIdleConns | N | Output | The max idle connections. Integer greater than 0 | `"10"` | -| maxOpenConns | N | Output | The max open connections. Integer greater than 0 | `"10"` | -| connMaxLifetime | N | Output | The max connection lifetime. Duration string | `"12s"` | -| connMaxIdleTime | N | Output | The max connection idel time. Duration string | `"12s"` | +| `url` | Y | Output | Represent DB connection in Data Source Name (DNS) format. See [here](#ssl-connection-details) SSL details | `"user:password@tcp(localhost:3306)/dbname"` | +| `pemPath` | Y | Output | Path to the PEM file. Used with SSL connection | `"path/to/pem/file"` | +| `maxIdleConns` | N | Output | The max idle connections. Integer greater than 0 | `"10"` | +| `maxOpenConns` | N | Output | The max open connections. Integer greater than 0 | `"10"` | +| `connMaxLifetime` | N | Output | The max connection lifetime. Duration string | `"12s"` | +| `connMaxIdleTime` | N | Output | The max connection idel time. Duration string | `"12s"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ### SSL connection diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md index fdefee5ccbf..31c9f230cff 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md @@ -23,7 +23,9 @@ spec: version: v1 metadata: - name: url # Required - value: + value: "" + - name: direction + value: "" ``` {{% alert title="Warning" color="warning" %}} @@ -34,7 +36,8 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| url | Y | Output | PostgreSQL connection string See [here](#url-format) for more details | `"user=dapr password=secret host=dapr.example.com port=5432 dbname=dapr sslmode=verify-ca"` | +| `url` | Y | Output | PostgreSQL connection string See [here](#url-format) for more details | `"user=dapr password=secret host=dapr.example.com port=5432 dbname=dapr sslmode=verify-ca"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ### URL format diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/postmark.md b/daprdocs/content/en/reference/components-reference/supported-bindings/postmark.md index f14965a9d7b..03edb8db2ab 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/postmark.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/postmark.md @@ -30,6 +30,8 @@ spec: value: "dave@dapr.io" # optional - name: subject value: "Hello!" # optional + - name: direction + value: "output" # optional ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -39,13 +41,14 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| accountToken | Y | Output | The Postmark account token, this should be considered a secret value | `"account token"` | -| serverToken | Y | Output | The Postmark server token, this should be considered a secret value | `"server token"` | -| emailFrom | N | Output | If set this specifies the 'from' email address of the email message | `"me@exmaple.com"` | -| emailTo | N | Output | If set this specifies the 'to' email address of the email message | `"me@example.com"` | -| emailCc | N | Output | If set this specifies the 'cc' email address of the email message | `"me@example.com"` | -| emailBcc | N | Output | If set this specifies the 'bcc' email address of the email message | `"me@example.com"` | -| subject | N | Output | If set this specifies the subject of the email message | `"me@example.com"` | +| `accountToken` | Y | Output | The Postmark account token, this should be considered a secret value | `"account token"` | +| `serverToken` | Y | Output | The Postmark server token, this should be considered a secret value | `"server token"` | +| `emailFrom` | N | Output | If set this specifies the 'from' email address of the email message | `"me@exmaple.com"` | +| `emailTo` | N | Output | If set this specifies the 'to' email address of the email message | `"me@example.com"` | +| `emailCc` | N | Output | If set this specifies the 'cc' email address of the email message | `"me@example.com"` | +| `emailBcc` | N | Output | If set this specifies the 'bcc' email address of the email message | `"me@example.com"` | +| `subject` | N | Output | If set this specifies the subject of the email message | `"me@example.com"` | +| `direction` | N | Output | The direction of the binding | `"output"` | You can specify any of the optional metadata properties on the output binding request too (e.g. `emailFrom`, `emailTo`, `subject`, etc.) diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/rabbitmq.md b/daprdocs/content/en/reference/components-reference/supported-bindings/rabbitmq.md index 89d150f43a4..d18d735e711 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/rabbitmq.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/rabbitmq.md @@ -22,33 +22,35 @@ spec: version: v1 metadata: - name: queueName - value: queue1 + value: "queue1" - name: host - value: amqp://[username][:password]@host.domain[:port] + value: "amqp://[username][:password]@host.domain[:port]" - name: durable - value: true + value: "true" - name: deleteWhenUnused - value: false + value: "false" - name: ttlInSeconds - value: 60 + value: "60" - name: prefetchCount - value: 0 + value: "0" - name: exclusive - value: false + value: "false" - name: maxPriority - value: 5 + value: "5" - name: contentType value: "text/plain" - name: reconnectWaitInSeconds - value: 5 + value: "5" - name: externalSasl - value: false + value: "false" - name: caCert - value: null + value: "null" - name: clientCert - value: null + value: "null" - name: clientKey - value: null + value: "null" + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} @@ -57,22 +59,26 @@ The above example uses secrets as plain strings. It is recommended to use a secr ## Spec metadata fields +> When a new RabbitMQ message gets published, all values from the associated metadata are added to the message's header values. + | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| queueName | Y | Input/Output | The RabbitMQ queue name | `"myqueue"` | -| host | Y | Input/Output | The RabbitMQ host address | `"amqp://[username][:password]@host.domain[:port]"` or with TLS: `"amqps://[username][:password]@host.domain[:port]"` | -| durable | N | Output | Tells RabbitMQ to persist message in storage. Defaults to `"false"` | `"true"`, `"false"` | -| deleteWhenUnused | N | Input/Output | Enables or disables auto-delete. Defaults to `"false"` | `"true"`, `"false"` | -| ttlInSeconds | N | Output | Set the [default message time to live at RabbitMQ queue level](https://www.rabbitmq.com/ttl.html). If this parameter is omitted, messages won't expire, continuing to exist on the queue until processed. See [also](#specifying-a-ttl-per-message) | `60` | -| prefetchCount | N | Input | Set the [Channel Prefetch Setting (QoS)](https://www.rabbitmq.com/confirms.html#channel-qos-prefetch). If this parameter is omiited, QoS would set value to 0 as no limit | `0` | -| exclusive | N | Input/Output | Determines whether the topic will be an exclusive topic or not. Defaults to `"false"` | `"true"`, `"false"` | -| maxPriority| N | Input/Output | Parameter to set the [priority queue](https://www.rabbitmq.com/priority.html). If this parameter is omitted, queue will be created as a general queue instead of a priority queue. Value between 1 and 255. See [also](#specifying-a-priority-per-message) | `"1"`, `"10"` | -| contentType | N | Input/Output | The content type of the message. Defaults to "text/plain". | `"text/plain"`, `"application/cloudevent+json"` and so on | -| reconnectWaitInSeconds | N | Input/Output | Represents the duration in seconds that the client should wait before attempting to reconnect to the server after a disconnection occurs. Defaults to `"5"`. | `"5"`, `"10"` | -| externalSasl | N | Input/Output | With TLS, should the username be taken from an additional field (e.g. CN.) See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` | -| caCert | N | Input/Output | The CA certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` | -| clientCert | N | Input/Output | The client certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` | -| clientKey | N | Input/Output | The client key to use for TLS connection. Defaults to `null`. | `"-----BEGIN PRIVATE KEY-----\nMI..."` | +| `queueName` | Y | Input/Output | The RabbitMQ queue name | `"myqueue"` | +| `host` | Y | Input/Output | The RabbitMQ host address | `"amqp://[username][:password]@host.domain[:port]"` or with TLS: `"amqps://[username][:password]@host.domain[:port]"` | +| `durable` | N | Output | Tells RabbitMQ to persist message in storage. Defaults to `"false"` | `"true"`, `"false"` | +| `deleteWhenUnused` | N | Input/Output | Enables or disables auto-delete. Defaults to `"false"` | `"true"`, `"false"` | +| `ttlInSeconds` | N | Output | Set the [default message time to live at RabbitMQ queue level](https://www.rabbitmq.com/ttl.html). If this parameter is omitted, messages won't expire, continuing to exist on the queue until processed. See [also](#specifying-a-ttl-per-message) | `60` | +| `prefetchCount` | N | Input | Set the [Channel Prefetch Setting (QoS)](https://www.rabbitmq.com/confirms.html#channel-qos-prefetch). If this parameter is omiited, QoS would set value to 0 as no limit | `0` | +| `exclusive` | N | Input/Output | Determines whether the topic will be an exclusive topic or not. Defaults to `"false"` | `"true"`, `"false"` | +| `maxPriority`| N | Input/Output | Parameter to set the [priority queue](https://www.rabbitmq.com/priority.html). If this parameter is omitted, queue will be created as a general queue instead of a priority queue. Value between 1 and 255. See [also](#specifying-a-priority-per-message) | `"1"`, `"10"` | +| `contentType` | N | Input/Output | The content type of the message. Defaults to "text/plain". | `"text/plain"`, `"application/cloudevent+json"` and so on | +| `reconnectWaitInSeconds` | N | Input/Output | Represents the duration in seconds that the client should wait before attempting to reconnect to the server after a disconnection occurs. Defaults to `"5"`. | `"5"`, `"10"` | +| `externalSasl` | N | Input/Output | With TLS, should the username be taken from an additional field (e.g. CN.) See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` | +| `caCert` | N | Input/Output | The CA certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` | +| `clientCert` | N | Input/Output | The client certificate to use for TLS connection. Defaults to `null`. | `"-----BEGIN CERTIFICATE-----\nMI..."` | +| `clientKey` | N | Input/Output | The client key to use for TLS connection. Defaults to `null`. | `"-----BEGIN PRIVATE KEY-----\nMI..."` | +| `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` | + ## Binding support This component supports both **input and output** binding interfaces. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md b/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md index 8fe7638e048..4b966a75e03 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/redis.md @@ -22,11 +22,13 @@ spec: version: v1 metadata: - name: redisHost - value:
:6379 + value: "
:6379" - name: redisPassword - value: ************** + value: "**************" - name: enableTLS - value: + value: "" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -37,29 +39,29 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| redisHost | Y | Output | The Redis host address | `"localhost:6379"` | -| redisPassword | Y | Output | The Redis password | `"password"` | -| redisUsername | N | Output | Username for Redis host. Defaults to empty. Make sure your redis server version is 6 or above, and have created acl rule correctly. | `"username"` | -| enableTLS | N | Output | If the Redis instance supports TLS with public certificates it can be configured to enable or disable TLS. Defaults to `"false"` | `"true"`, `"false"` | -| failover | N | Output | Property to enabled failover configuration. Needs sentinalMasterName to be set. Defaults to `"false"` | `"true"`, `"false"` -| sentinelMasterName | N | Output | The sentinel master name. See [Redis Sentinel Documentation](https://redis.io/docs/reference/sentinel-clients/) | `""`, `"127.0.0.1:6379"` -| redeliverInterval | N | Output | The interval between checking for pending messages to redelivery. Defaults to `"60s"`. `"0"` disables redelivery. | `"30s"` -| processingTimeout | N | Output | The amount time a message must be pending before attempting to redeliver it. Defaults to `"15s"`. `"0"` disables redelivery. | `"30s"` -| redisType | N | Output | The type of redis. There are two valid values, one is `"node"` for single node mode, the other is `"cluster"` for redis cluster mode. Defaults to `"node"`. | `"cluster"` -| redisDB | N | Output | Database selected after connecting to redis. If `"redisType"` is `"cluster"` this option is ignored. Defaults to `"0"`. | `"0"` -| redisMaxRetries | N | Output | Maximum number of times to retry commands before giving up. Default is to not retry failed commands. | `"5"` -| redisMinRetryInterval | N | Output | Minimum backoff for redis commands between each retry. Default is `"8ms"`; `"-1"` disables backoff. | `"8ms"` -| redisMaxRetryInterval | N | Output | Maximum backoff for redis commands between each retry. Default is `"512ms"`;`"-1"` disables backoff. | `"5s"` -| dialTimeout | N | Output | Dial timeout for establishing new connections. Defaults to `"5s"`. | `"5s"` -| readTimeout | N | Output | Timeout for socket reads. If reached, redis commands will fail with a timeout instead of blocking. Defaults to `"3s"`, `"-1"` for no timeout. | `"3s"` -| writeTimeout | N | Output | Timeout for socket writes. If reached, redis commands will fail with a timeout instead of blocking. Defaults is readTimeout. | `"3s"` -| poolSize | N | Output | Maximum number of socket connections. Default is 10 connections per every CPU as reported by runtime.NumCPU. | `"20"` -| poolTimeout | N | Output | Amount of time client waits for a connection if all connections are busy before returning an error. Default is readTimeout + 1 second. | `"5s"` -| maxConnAge | N | Output | Connection age at which the client retires (closes) the connection. Default is to not close aged connections. | `"30m"` -| minIdleConns | N | Output | Minimum number of idle connections to keep open in order to avoid the performance degradation associated with creating new connections. Defaults to `"0"`. | `"2"` -| idleCheckFrequency | N | Output | Frequency of idle checks made by idle connections reaper. Default is `"1m"`. `"-1"` disables idle connections reaper. | `"-1"` -| idleTimeout | N | Output | Amount of time after which the client closes idle connections. Should be less than server's timeout. Default is `"5m"`. `"-1"` disables idle timeout check. | `"10m"` - +| `redisHost` | Y | Output | The Redis host address | `"localhost:6379"` | +| `redisPassword` | Y | Output | The Redis password | `"password"` | +| `redisUsername` | N | Output | Username for Redis host. Defaults to empty. Make sure your redis server version is 6 or above, and have created acl rule correctly. | `"username"` | +| `enableTLS` | N | Output | If the Redis instance supports TLS with public certificates it can be configured to enable or disable TLS. Defaults to `"false"` | `"true"`, `"false"` | +| `failover` | N | Output | Property to enabled failover configuration. Needs sentinalMasterName to be set. Defaults to `"false"` | `"true"`, `"false"` +| `sentinelMasterName` | N | Output | The sentinel master name. See [Redis Sentinel Documentation](https://redis.io/docs/reference/sentinel-clients/) | `""`, `"127.0.0.1:6379"` +| `redeliverInterval` | N | Output | The interval between checking for pending messages to redelivery. Defaults to `"60s"`. `"0"` disables redelivery. | `"30s"` +| `processingTimeout` | N | Output | The amount time a message must be pending before attempting to redeliver it. Defaults to `"15s"`. `"0"` disables redelivery. | `"30s"` +| `redisType` | N | Output | The type of redis. There are two valid values, one is `"node"` for single node mode, the other is `"cluster"` for redis cluster mode. Defaults to `"node"`. | `"cluster"` +| `redisDB` | N | Output | Database selected after connecting to redis. If `"redisType"` is `"cluster"` this option is ignored. Defaults to `"0"`. | `"0"` +| `redisMaxRetries` | N | Output | Maximum number of times to retry commands before giving up. Default is to not retry failed commands. | `"5"` +| `redisMinRetryInterval` | N | Output | Minimum backoff for redis commands between each retry. Default is `"8ms"`; `"-1"` disables backoff. | `"8ms"` +| `redisMaxRetryInterval` | N | Output | Maximum backoff for redis commands between each retry. Default is `"512ms"`;`"-1"` disables backoff. | `"5s"` +| `dialTimeout` | N | Output | Dial timeout for establishing new connections. Defaults to `"5s"`. | `"5s"` +| `readTimeout` | N | Output | Timeout for socket reads. If reached, redis commands will fail with a timeout instead of blocking. Defaults to `"3s"`, `"-1"` for no timeout. | `"3s"` +| `writeTimeout` | N | Output | Timeout for socket writes. If reached, redis commands will fail with a timeout instead of blocking. Defaults is readTimeout. | `"3s"` +| `poolSize` | N | Output | Maximum number of socket connections. Default is 10 connections per every CPU as reported by runtime.NumCPU. | `"20"` +| `poolTimeout` | N | Output | Amount of time client waits for a connection if all connections are busy before returning an error. Default is readTimeout + 1 second. | `"5s"` +| `maxConnAge` | N | Output | Connection age at which the client retires (closes) the connection. Default is to not close aged connections. | `"30m"` +| `minIdleConns` | N | Output | Minimum number of idle connections to keep open in order to avoid the performance degradation associated with creating new connections. Defaults to `"0"`. | `"2"` +| `idleCheckFrequency` | N | Output | Frequency of idle checks made by idle connections reaper. Default is `"1m"`. `"-1"` disables idle connections reaper. | `"-1"` +| `idleTimeout` | N | Output | Amount of time after which the client closes idle connections. Should be less than server's timeout. Default is `"5m"`. `"-1"` disables idle timeout check. | `"10m"` +| `direction` | N | Output | Direction of the binding. | `"output"` ## Binding support @@ -96,6 +98,8 @@ An HTTP 204 (No Content) and empty body is returned if successful. You can get a record in Redis using the `get` operation. This gets a key that was previously set. +This takes an optional parameter `delete`, which is by default `false`. When it is set to `true`, this operation uses the `GETDEL` operation of Redis. For example, it returns the `value` which was previously set and then deletes it. + #### Request ```json @@ -120,6 +124,20 @@ You can get a record in Redis using the `get` operation. This gets a key that wa } ``` +#### Request with delete flag + +```json +{ + "operation": "get", + "metadata": { + "key": "key1", + "delete": "true" + }, + "data": { + } +} +``` + ### delete You can delete a record in Redis using the `delete` operation. Returns success whether the key exists or not. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/rethinkdb.md b/daprdocs/content/en/reference/components-reference/supported-bindings/rethinkdb.md index dab91979466..e159a7bb774 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/rethinkdb.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/rethinkdb.md @@ -15,7 +15,6 @@ To enable users to track change of the state of actors, this binding leverages R To setup RethinkDB statechange binding create a component of type `bindings.rethinkdb.statechange`. See [this guide]({{< ref "howto-bindings.md#1-create-a-binding" >}}) on how to create and apply a binding configuration. - ```yaml apiVersion: dapr.io/v1alpha1 kind: Component @@ -26,17 +25,20 @@ spec: version: v1 metadata: - name: address - value: # Required, e.g. 127.0.0.1:28015 or rethinkdb.default.svc.cluster.local:28015). + value: "" # Required, e.g. 127.0.0.1:28015 or rethinkdb.default.svc.cluster.local:28015). - name: database - value: # Required, e.g. dapr (alpha-numerics only) + value: "" # Required, e.g. dapr (alpha-numerics only) + - name: direction + value: "" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| address | Y | Input | Address of RethinkDB server | `"27.0.0.1:28015"`, `"rethinkdb.default.svc.cluster.local:28015"` | -| database | Y | Input | RethinDB database name | `"dapr"` | +| `address` | Y | Input | Address of RethinkDB server | `"27.0.0.1:28015"`, `"rethinkdb.default.svc.cluster.local:28015"` | +| `database` | Y | Input | RethinDB database name | `"dapr"` | +| `direction` | N | Input | Direction of the binding | `"input"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md b/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md index 4d50e3447d6..1c01459c3ba 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/s3.md @@ -23,27 +23,29 @@ spec: version: v1 metadata: - name: bucket - value: mybucket + value: "mybucket" - name: region - value: us-west-2 + value: "us-west-2" - name: endpoint - value: s3.us-west-2.amazonaws.com + value: "s3.us-west-2.amazonaws.com" - name: accessKey - value: ***************** + value: "*****************" - name: secretKey - value: ***************** + value: "*****************" - name: sessionToken - value: mysession + value: "mysession" - name: decodeBase64 - value: + value: "" - name: encodeBase64 - value: + value: "" - name: forcePathStyle - value: + value: "" - name: disableSSL - value: + value: "" - name: insecureSSL - value: + value: "" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -54,17 +56,18 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| bucket | Y | Output | The name of the S3 bucket to write to | `"bucket"` | -| region | Y | Output | The specific AWS region | `"us-east-1"` | -| endpoint | N | Output | The specific AWS endpoint | `"s3.us-east-1.amazonaws.com"` | -| accessKey | Y | Output | The AWS Access Key to access this resource | `"key"` | -| secretKey | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | -| sessionToken | N | Output | The AWS session token to use | `"sessionToken"` | -| forcePathStyle | N | Output | Currently Amazon S3 SDK supports virtual hosted-style and path-style access. `true` is path-style format like `https:////`. `false` is hosted-style format like `https://./`. Defaults to `false` | `true`, `false` | -| decodeBase64 | N | Output | Configuration to decode base64 file content before saving to bucket storage. (In case of saving a file with binary content). `true` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `true`, `false` | -| encodeBase64 | N | Output | Configuration to encode base64 file content before return the content. (In case of opening a file with binary content). `true` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `true`, `false` | -| disableSSL | N | Output | Allows to connect to non `https://` endpoints. Defaults to `false` | `true`, `false` | -| insecureSSL | N | Output | When connecting to `https://` endpoints, accepts invalid or self-signed certificates. Defaults to `false` | `true`, `false` | +| `bucket` | Y | Output | The name of the S3 bucket to write to | `"bucket"` | +| `region` | Y | Output | The specific AWS region | `"us-east-1"` | +| `endpoint` | N | Output | The specific AWS endpoint | `"s3.us-east-1.amazonaws.com"` | +| `accessKey` | Y | Output | The AWS Access Key to access this resource | `"key"` | +| `secretKey` | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | +| `sessionToken` | N | Output | The AWS session token to use | `"sessionToken"` | +| `forcePathStyle` | N | Output | Currently Amazon S3 SDK supports virtual hosted-style and path-style access. `"true"` is path-style format like `"https:////"`. `"false"` is hosted-style format like `"https://./"`. Defaults to `"false"` | `"true"`, `"false"` | +| `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to bucket storage. (In case of saving a file with binary content). `"true"` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `false` | `"true"`, `"false"` | +| `encodeBase64` | N | Output | Configuration to encode base64 file content before return the content. (In case of opening a file with binary content). `"true"` is the only allowed positive value. Other positive variations like `"True", "1"` are not acceptable. Defaults to `"false"` | `"true"`, `"false"` | +| `disableSSL` | N | Output | Allows to connect to non `https://` endpoints. Defaults to `"false"` | `"true"`, `"false"` | +| `insecureSSL` | N | Output | When connecting to `https://` endpoints, accepts invalid or self-signed certificates. Defaults to `"false"` | `"true"`, `"false"` | +| `direction` | N | Output | The direction of the binding | `"output"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/sendgrid.md b/daprdocs/content/en/reference/components-reference/supported-bindings/sendgrid.md index 8a82f2f61c7..13577cf9895 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/sendgrid.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/sendgrid.md @@ -37,6 +37,8 @@ spec: value: "bob@dapr.io" # optional - name: apiKey value: "YOUR_API_KEY" # required, this is your SendGrid key + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -47,15 +49,15 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| apiKey | Y | Output | SendGrid API key, this should be considered a secret value | `"apikey"` | -| emailFrom | N | Output | If set this specifies the 'from' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | -| emailFromName | N | Output | If set this specifies the 'from' name of the email message. Optional field, see [below](#example-request-payload) | `"me"` | -| emailTo | N | Output | If set this specifies the 'to' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | -| emailToName | N | Output | If set this specifies the 'to' name of the email message. Optional field, see [below](#example-request-payload) | `"me"` | -| emailCc | N | Output | If set this specifies the 'cc' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | -| emailBcc | N | Output | If set this specifies the 'bcc' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | -| subject | N | Output | If set this specifies the subject of the email message. Optional field, see [below](#example-request-payload) | `"subject of the email"` | - +| `apiKey` | Y | Output | SendGrid API key, this should be considered a secret value | `"apikey"` | +| `emailFrom` | N | Output | If set this specifies the 'from' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | +| `emailFromName` | N | Output | If set this specifies the 'from' name of the email message. Optional field, see [below](#example-request-payload) | `"me"` | +| `emailTo` | N | Output | If set this specifies the 'to' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | +| `emailToName` | N | Output | If set this specifies the 'to' name of the email message. Optional field, see [below](#example-request-payload) | `"me"` | +| `emailCc` | N | Output | If set this specifies the 'cc' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | +| `emailBcc` | N | Output | If set this specifies the 'bcc' email address of the email message. Only a single email address is allowed. Optional field, see [below](#example-request-payload) | `"me@example.com"` | +| `subject` | N | Output | If set this specifies the subject of the email message. Optional field, see [below](#example-request-payload) | `"subject of the email"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md b/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md index 8e5bab35675..1f8ffe68b7b 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md @@ -25,37 +25,39 @@ spec: - name: connectionString # Required when not using Azure Authentication. value: "Endpoint=sb://{ServiceBusNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={ServiceBus}" - name: queueName - value: queue1 + value: "queue1" # - name: timeoutInSec # Optional - # value: 60 + # value: "60" # - name: handlerTimeoutInSec # Optional - # value: 60 + # value: "60" # - name: disableEntityManagement # Optional # value: "false" # - name: maxDeliveryCount # Optional - # value: 3 + # value: "3" # - name: lockDurationInSec # Optional - # value: 60 + # value: "60" # - name: lockRenewalInSec # Optional - # value: 20 + # value: "20" # - name: maxActiveMessages # Optional - # value: 10000 + # value: "10000" # - name: maxConcurrentHandlers # Optional - # value: 10 + # value: "10" # - name: defaultMessageTimeToLiveInSec # Optional - # value: 10 + # value: "10" # - name: autoDeleteOnIdleInSec # Optional - # value: 3600 + # value: "3600" # - name: minConnectionRecoveryInSec # Optional - # value: 2 + # value: "2" # - name: maxConnectionRecoveryInSec # Optional - # value: 300 + # value: "300" # - name: maxRetriableErrorsPerSec # Optional - # value: 10 + # value: "10" # - name: publishMaxRetries # Optional - # value: 5 + # value: "5" # - name: publishInitialRetryIntervalInMs # Optional - # value: 500 + # value: "500" + # - name: direction + # value: "input, output" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -67,25 +69,26 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|-----------------|----------|---------| | `connectionString` | Y | Input/Output | The Service Bus connection string. Required unless using Azure AD authentication. | `"Endpoint=sb://************"` | | `queueName` | Y | Input/Output | The Service Bus queue name. Queue names are case-insensitive and will always be forced to lowercase. | `"queuename"` | -| `timeoutInSec` | N | Input/Output | Timeout for all invocations to the Azure Service Bus endpoint, in seconds. *Note that this option impacts network calls and it's unrelated to the TTL applies to messages*. Default: `60` | `60` | +| `timeoutInSec` | N | Input/Output | Timeout for all invocations to the Azure Service Bus endpoint, in seconds. *Note that this option impacts network calls and it's unrelated to the TTL applies to messages*. Default: `"60"` | `"60"` | | `namespaceName`| N | Input/Output | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` | | `disableEntityManagement` | N | Input/Output | When set to true, queues and subscriptions do not get created automatically. Default: `"false"` | `"true"`, `"false"` -| `lockDurationInSec` | N | Input/Output | Defines the length in seconds that a message will be locked for before expiring. Used during subscription creation only. Default set by server. | `30` -| `autoDeleteOnIdleInSec` | N | Input/Output | Time in seconds to wait before auto deleting idle subscriptions. Used during subscription creation only. Default: `0` (disabled) | `3600` -| `defaultMessageTimeToLiveInSec` | N | Input/Output | Default message time to live, in seconds. Used during subscription creation only. | `10` -| `maxDeliveryCount` | N | Input/Output | Defines the number of attempts the server will make to deliver a message. Used during subscription creation only. Default set by server. | `10` -| `minConnectionRecoveryInSec` | N | Input/Output | Minimum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. Default: `2` | `5` -| `maxConnectionRecoveryInSec` | N | Input/Output | Maximum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. After each attempt, the component waits a random number of seconds, increasing every time, between the minimum and the maximum. Default: `300` (5 minutes) | `600` -| `maxActiveMessages` | N | Defines the maximum number of messages to be processing or in the buffer at once. This should be at least as big as the maximum concurrent handlers. Default: `1` | `1` -| `handlerTimeoutInSec`| N | Input | Timeout for invoking the app's handler. Default: `0` (no timeout) | `30` -| `minConnectionRecoveryInSec` | N | Input | Minimum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. Default: `2` | `5` | -| `maxConnectionRecoveryInSec` | N | Input | Maximum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. After each attempt, the binding waits a random number of seconds, increasing every time, between the minimum and the maximum. Default: `300` (5 minutes) | `600` | -| `lockRenewalInSec` | N | Input | Defines the frequency at which buffered message locks will be renewed. Default: `20`. | `20` -| `maxActiveMessages` | N | Input | Defines the maximum number of messages to be processing or in the buffer at once. This should be at least as big as the maximum concurrent handlers. Default: `1` | `2000` -| `maxConcurrentHandlers` | N | Input | Defines the maximum number of concurrent message handlers; set to `0` for unlimited. Default: `1` | `10` -| `maxRetriableErrorsPerSec` | N | Input | Maximum number of retriable errors that are processed per second. If a message fails to be processed with a retriable error, the component adds a delay before it starts processing another message, to avoid immediately re-processing messages that have failed. Default: `10` | `10` -| `publishMaxRetries` | N | Output | The max number of retries for when Azure Service Bus responds with "too busy" in order to throttle messages. Defaults: `5` | `5` -| `publishInitialRetryIntervalInMs` | N | Output | Time in milliseconds for the initial exponential backoff when Azure Service Bus throttle messages. Defaults: `500` | `500` +| `lockDurationInSec` | N | Input/Output | Defines the length in seconds that a message will be locked for before expiring. Used during subscription creation only. Default set by server. | `"30"` +| `autoDeleteOnIdleInSec` | N | Input/Output | Time in seconds to wait before auto deleting idle subscriptions. Used during subscription creation only. Default: `"0"` (disabled) | `"3600"` +| `defaultMessageTimeToLiveInSec` | N | Input/Output | Default message time to live, in seconds. Used during subscription creation only. | `"10"` +| `maxDeliveryCount` | N | Input/Output | Defines the number of attempts the server will make to deliver a message. Used during subscription creation only. Default set by server. | `"10"` +| `minConnectionRecoveryInSec` | N | Input/Output | Minimum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. Default: `"2"` | `"5"` +| `maxConnectionRecoveryInSec` | N | Input/Output | Maximum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. After each attempt, the component waits a random number of seconds, increasing every time, between the minimum and the maximum. Default: `"300"` (5 minutes) | `"600"` +| `maxActiveMessages` | N | Defines the maximum number of messages to be processing or in the buffer at once. This should be at least as big as the maximum concurrent handlers. Default: `"1"` | `"1"` +| `handlerTimeoutInSec`| N | Input | Timeout for invoking the app's handler. Default: `"0"` (no timeout) | `"30"` +| `minConnectionRecoveryInSec` | N | Input | Minimum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. Default: `"2"` | `"5"` | +| `maxConnectionRecoveryInSec` | N | Input | Maximum interval (in seconds) to wait before attempting to reconnect to Azure Service Bus in case of a connection failure. After each attempt, the binding waits a random number of seconds, increasing every time, between the minimum and the maximum. Default: `"300"` (5 minutes) | `"600"` | +| `lockRenewalInSec` | N | Input | Defines the frequency at which buffered message locks will be renewed. Default: `"20"`. | `"20"` +| `maxActiveMessages` | N | Input | Defines the maximum number of messages to be processing or in the buffer at once. This should be at least as big as the maximum concurrent handlers. Default: `"1"` | `"2000"` +| `maxConcurrentHandlers` | N | Input | Defines the maximum number of concurrent message handlers; set to `0` for unlimited. Default: `"1"` | `"10"` +| `maxRetriableErrorsPerSec` | N | Input | Maximum number of retriable errors that are processed per second. If a message fails to be processed with a retriable error, the component adds a delay before it starts processing another message, to avoid immediately re-processing messages that have failed. Default: `"10"` | `"10"` +| `publishMaxRetries` | N | Output | The max number of retries for when Azure Service Bus responds with "too busy" in order to throttle messages. Defaults: `"5"` | `"5"` +| `publishInitialRetryIntervalInMs` | N | Output | Time in milliseconds for the initial exponential backoff when Azure Service Bus throttle messages. Defaults: `"500"` | `"500"` +| `direction` | N | Input/Output | The direction of the binding | `"input"`, `"output"`, `"input, output"` ### Azure Active Directory (AAD) authentication @@ -153,6 +156,36 @@ curl -X POST http://localhost:3500/v1.0/bindings/myServiceBusQueue \ {{< /tabs >}} +## Schedule a message + +A message can be scheduled for delayed processing. + +To schedule a message, use the `metadata` section in the request body during the binding invocation: the field name is `ScheduledEnqueueTimeUtc`. + +The supported timestamp formats are [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339). + +{{< tabs "Linux">}} + +{{% codetab %}} + +```shell +curl -X POST http://localhost:3500/v1.0/bindings/myServiceBusQueue \ + -H "Content-Type: application/json" \ + -d '{ + "data": { + "message": "Hi" + }, + "metadata": { + "ScheduledEnqueueTimeUtc": "Tue, 02 Jan 2024 15:04:05 GMT" + }, + "operation": "create" + }' +``` + +{{% /codetab %}} + +{{< /tabs >}} + ## Related links - [Basic schema for a Dapr component]({{< ref component-schema >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/ses.md b/daprdocs/content/en/reference/components-reference/supported-bindings/ses.md index 23451191118..7f63892fb41 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/ses.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/ses.md @@ -40,6 +40,8 @@ spec: value: "bcc@example.com" - name: subject value: "subject" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -50,15 +52,16 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| region | N | Output | The specific AWS region | `"eu-west-1"` | -| accessKey | N | Output | The AWS Access Key to access this resource | `"key"` | -| secretKey | N | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | -| sessionToken | N | Output | The AWS session token to use | `"sessionToken"` | -| emailFrom | N | Output | If set, this specifies the email address of the sender. See [also](#example-request) | `"me@example.com"` | -| emailTo | N | Output | If set, this specifies the email address of the receiver. See [also](#example-request) | `"me@example.com"` | -| emailCc | N | Output | If set, this specifies the email address to CC in. See [also](#example-request) | `"me@example.com"` | -| emailBcc | N | Output | If set, this specifies email address to BCC in. See [also](#example-request) | `"me@example.com"` | -| subject | N | Output | If set, this specifies the subject of the email message. See [also](#example-request) | `"subject of mail"` | +| `region` | N | Output | The specific AWS region | `"eu-west-1"` | +| `accessKey` | N | Output | The AWS Access Key to access this resource | `"key"` | +| `secretKey` | N | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | +| `sessionToken` | N | Output | The AWS session token to use | `"sessionToken"` | +| `emailFrom` | N | Output | If set, this specifies the email address of the sender. See [also](#example-request) | `"me@example.com"` | +| `emailTo` | N | Output | If set, this specifies the email address of the receiver. See [also](#example-request) | `"me@example.com"` | +| `emailCc` | N | Output | If set, this specifies the email address to CC in. See [also](#example-request) | `"me@example.com"` | +| `emailBcc` | N | Output | If set, this specifies email address to BCC in. See [also](#example-request) | `"me@example.com"` | +| `subject` | N | Output | If set, this specifies the subject of the email message. See [also](#example-request) | `"subject of mail"` | +| `direction` | N | Output | The direction of the binding | `"output"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md b/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md index b40dcffd776..2da23916d5d 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/signalr.md @@ -22,9 +22,11 @@ spec: version: v1 metadata: - name: connectionString - value: Endpoint=https://.service.signalr.net;AccessKey=;Version=1.0; + value: "Endpoint=https://.service.signalr.net;AccessKey=;Version=1.0;" - name: hub # Optional - value: + value: "" + - name: direction + value: "" ``` {{% alert title="Warning" color="warning" %}} @@ -37,8 +39,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|------------|-----|---------| | `connectionString` | Y | Output | The Azure SignalR connection string | `"Endpoint=https://.service.signalr.net;AccessKey=;Version=1.0;"` | | `hub` | N | Output | Defines the hub in which the message will be send. The hub can be dynamically defined as a metadata value when publishing to an output binding (key is "hub") | `"myhub"` | -| `endpoint` | N | Output | Endpoint of Azure SignalR; required if not included in the `connectionString` or if using Azure AD | `https://.service.signalr.net` -| `accessKey` | N | Output | Access key | `your-access-key` +| `endpoint` | N | Output | Endpoint of Azure SignalR; required if not included in the `connectionString` or if using Azure AD | `"https://.service.signalr.net"` +| `accessKey` | N | Output | Access key | `"your-access-key"` +| `direction` | N | Output | The direction of the binding | `"output"` ### Azure Active Directory (Azure AD) authentication diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/smtp.md b/daprdocs/content/en/reference/components-reference/supported-bindings/smtp.md index c277dec0a10..67af19ba455 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/smtp.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/smtp.md @@ -43,6 +43,8 @@ spec: value: "subject" - name: priority value: "[value 1-5]" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -53,17 +55,18 @@ The example configuration shown above, contain a username and password as plain- | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| host | Y | Output | The host where your SMTP server runs | `"smtphost"` | -| port | Y | Output | The port your SMTP server listens on | `"9999"` | -| user | Y | Output | The user to authenticate against the SMTP server | `"user"` | -| password | Y | Output | The password of the user | `"password"` | -| skipTLSVerify | N | Output | If set to true, the SMPT server's TLS certificate will not be verified. Defaults to `"false"` | `"true"`, `"false"` | -| emailFrom | N | Output | If set, this specifies the email address of the sender. See [also](#example-request) | `"me@example.com"` | -| emailTo | N | Output | If set, this specifies the email address of the receiver. See [also](#example-request) | `"me@example.com"` | -| emailCc | N | Output | If set, this specifies the email address to CC in. See [also](#example-request) | `"me@example.com"` | -| emailBcc | N | Output | If set, this specifies email address to BCC in. See [also](#example-request) | `"me@example.com"` | -| subject | N | Output | If set, this specifies the subject of the email message. See [also](#example-request) | `"subject of mail"` | -| priority | N | Output | If set, this specifies the priority (X-Priority) of the email message, from 1 (lowest) to 5 (highest) (default value: 3). See [also](#example-request) | `"1"` | +| `host` | Y | Output | The host where your SMTP server runs | `"smtphost"` | +| `port` | Y | Output | The port your SMTP server listens on | `"9999"` | +| `user` | Y | Output | The user to authenticate against the SMTP server | `"user"` | +| `password` | Y | Output | The password of the user | `"password"` | +| `skipTLSVerify` | N | Output | If set to true, the SMPT server's TLS certificate will not be verified. Defaults to `"false"` | `"true"`, `"false"` | +| `emailFrom` | N | Output | If set, this specifies the email address of the sender. See [also](#example-request) | `"me@example.com"` | +| `emailTo` | N | Output | If set, this specifies the email address of the receiver. See [also](#example-request) | `"me@example.com"` | +| `emailCc` | N | Output | If set, this specifies the email address to CC in. See [also](#example-request) | `"me@example.com"` | +| `emailBcc` | N | Output | If set, this specifies email address to BCC in. See [also](#example-request) | `"me@example.com"` | +| `subject` | N | Output | If set, this specifies the subject of the email message. See [also](#example-request) | `"subject of mail"` | +| `priority` | N | Output | If set, this specifies the priority (X-Priority) of the email message, from 1 (lowest) to 5 (highest) (default value: 3). See [also](#example-request) | `"1"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/sns.md b/daprdocs/content/en/reference/components-reference/supported-bindings/sns.md index 6728fc9eeb5..dd6d704fbd2 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/sns.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/sns.md @@ -23,16 +23,17 @@ spec: version: v1 metadata: - name: topicArn - value: mytopic + value: "mytopic" - name: region - value: us-west-2 + value: "us-west-2" - name: accessKey - value: ***************** + value: "*****************" - name: secretKey - value: ***************** + value: "*****************" - name: sessionToken - value: ***************** - + value: "*****************" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} @@ -43,11 +44,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| topicArn | Y | Output | The SNS topic name | `"arn:::topicarn"` | -| region | Y | Output | The specific AWS region | `"us-east-1"` | -| accessKey | Y | Output | The AWS Access Key to access this resource | `"key"` | -| secretKey | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | -| sessionToken | N | Output | The AWS session token to use | `"sessionToken"` | +| `topicArn` | Y | Output | The SNS topic name | `"arn:::topicarn"` | +| `region` | Y | Output | The specific AWS region | `"us-east-1"` | +| `accessKey` | Y | Output | The AWS Access Key to access this resource | `"key"` | +| `secretKey` | Y | Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | +| `sessionToken` | N | Output | The AWS session token to use | `"sessionToken"` | +| `direction` | N | Output | The direction of the binding | `"output"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/sqs.md b/daprdocs/content/en/reference/components-reference/supported-bindings/sqs.md index aeef412d774..35e67106326 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/sqs.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/sqs.md @@ -23,16 +23,17 @@ spec: version: v1 metadata: - name: queueName - value: items + value: "items" - name: region - value: us-west-2 + value: "us-west-2" - name: accessKey - value: ***************** + value: "*****************" - name: secretKey - value: ***************** + value: "*****************" - name: sessionToken - value: ***************** - + value: "*****************" + - name: direction + value: "input, output" ``` {{% alert title="Warning" color="warning" %}} @@ -43,11 +44,12 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| queueName | Y | Input/Output | The SQS queue name | `"myqueue"` | -| region | Y | Input/Output | The specific AWS region | `"us-east-1"` | -| accessKey | Y | Input/Output | The AWS Access Key to access this resource | `"key"` | -| secretKey | Y | Input/Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | -| sessionToken | N | Input/Output | The AWS session token to use | `"sessionToken"` | +| `queueName` | Y | Input/Output | The SQS queue name | `"myqueue"` | +| `region` | Y | Input/Output | The specific AWS region | `"us-east-1"` | +| `accessKey` | Y | Input/Output | The AWS Access Key to access this resource | `"key"` | +| `secretKey` | Y | Input/Output | The AWS Secret Access Key to access this resource | `"secretAccessKey"` | +| `sessionToken` | N | Input/Output | The AWS session token to use | `"sessionToken"` | +| `direction` | N | Input/Output | The direction of the binding | `"input"`, `"output"`, `"input, output"` | {{% alert title="Important" color="warning" %}} When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md b/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md index 5e0d89330bb..e29e29932b7 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/storagequeues.md @@ -27,6 +27,8 @@ spec: value: "***********" - name: queueName value: "myqueue" +# - name: pollingInterval +# value: "30s" # - name: ttlInSeconds # value: "60" # - name: decodeBase64 @@ -37,6 +39,8 @@ spec: # value: "http://127.0.0.1:10001" # - name: visibilityTimeout # value: "30s" +# - name: direction +# value: "input, output" ``` {{% alert title="Warning" color="warning" %}} @@ -50,11 +54,13 @@ The above example uses secrets as plain strings. It is recommended to use a secr | `accountName` | Y | Input/Output | The name of the Azure Storage account | `"account1"` | | `accountKey` | Y* | Input/Output | The access key of the Azure Storage account. Only required when not using Azure AD authentication. | `"access-key"` | | `queueName` | Y | Input/Output | The name of the Azure Storage queue | `"myqueue"` | +| `pollingInterval` | N | Output | Set the interval to poll Azure Storage Queues for new messages, as a Go duration value. Default: `"10s"` | `"30s"` | | `ttlInSeconds` | N | Output | Parameter to set the default message time to live. If this parameter is omitted, messages will expire after 10 minutes. See [also](#specifying-a-ttl-per-message) | `"60"` | -| `decodeBase64` | N | Output | Configuration to decode base64 file content before saving to Storage Queues. (In case of saving a file with binary content). Defaults to `false` | `true`, `false` | +| `decodeBase64` | N | Input | Configuration to decode base64 content received from the Storage Queue into a string. Defaults to `false` | `true`, `false` | | `encodeBase64` | N | Output | If enabled base64 encodes the data payload before uploading to Azure storage queues. Default `false`. | `true`, `false` | | `endpoint` | N | Input/Output | Optional custom endpoint URL. This is useful when using the [Azurite emulator](https://github.com/Azure/azurite) or when using custom domains for Azure Storage (although this is not officially supported). The endpoint must be the full base URL, including the protocol (`http://` or `https://`), the IP or FQDN, and optional port. | `"http://127.0.0.1:10001"` or `"https://accountName.queue.example.com"` | -| `visibilityTimeout` | N | Input | Allows setting a custom queue visibility timeout to avoid immediate retrying of recently failed messages. Defaults to 30 seconds. | "100s" | +| `visibilityTimeout` | N | Input | Allows setting a custom queue visibility timeout to avoid immediate retrying of recently failed messages. Defaults to 30 seconds. | `"100s"` | +| `direction` | N | Input/Output | Direction of the binding. | `"input"`, `"output"`, `"input, output"` | ### Azure Active Directory (Azure AD) authentication diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/twilio.md b/daprdocs/content/en/reference/components-reference/supported-bindings/twilio.md index 1a6917d48d2..de30015c945 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/twilio.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/twilio.md @@ -23,13 +23,15 @@ spec: version: v1 metadata: - name: toNumber # required. - value: 111-111-1111 + value: "111-111-1111" - name: fromNumber # required. - value: 222-222-2222 + value: "222-222-2222" - name: accountSid # required. - value: ***************** + value: "*****************" - name: authToken # required. - value: ***************** + value: "*****************" + - name: direction + value: "output" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). @@ -39,10 +41,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Binding support | Details | Example | |--------------------|:--------:|------------|-----|---------| -| toNumber | Y | Output | The target number to send the sms to | `"111-111-1111"` | -| fromNumber | Y | Output | The sender phone number | `"122-222-2222"` | -| accountSid | Y | Output | The Twilio account SID | `"account sid"` | -| authToken | Y | Output | The Twilio auth token | `"auth token"` | +| `toNumber` | Y | Output | The target number to send the sms to | `"111-111-1111"` | +| `fromNumber` | Y | Output | The sender phone number | `"222-222-2222"` | +| `accountSid` | Y | Output | The Twilio account SID | `"account sid"` | +| `authToken` | Y | Output | The Twilio auth token | `"auth token"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/wasm.md b/daprdocs/content/en/reference/components-reference/supported-bindings/wasm.md index 0ca9a69d450..c19c8b9046d 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/wasm.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/wasm.md @@ -36,6 +36,10 @@ Wasm binaries are loaded from a URL. For example, the URL `file://rewrite.wasm` loads `rewrite.wasm` from the current directory of the process. On Kubernetes, see [How to: Mount Pod volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts.md >}}) to configure a filesystem mount that can contain Wasm binaries. +It is also possible to fetch the Wasm binary from a remote URL. In this case, +the URL must point exactly to one Wasm binary. For example: +- `http://example.com/rewrite.wasm`, or +- `https://example.com/rewrite.wasm`. Dapr uses [wazero](https://wazero.io) to run these binaries, because it has no dependencies. This allows use of WebAssembly with no installation process @@ -58,14 +62,16 @@ spec: metadata: - name: url value: "file://uppercase.wasm" + - name: direction + value: "output" ``` ## Spec metadata fields | Field | Details | Required | Example | |-------|----------------------------------------------------------------|----------|----------------| -| url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm` | - +| url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`, `http://`, and `https://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm`, `https://example.com/hello.wasm` | +| `direction` | The direction of the binding | false | `"output"` | ## Binding support @@ -80,18 +86,39 @@ pass metadata properties with each request: - `args` any CLI arguments, comma-separated. This excludes the program name. -For example, if the binding `url` was a Ruby interpreter, such as from -[webassembly-language-runtimes](https://github.com/vmware-labs/webassembly-language-runtimes/releases/tag/ruby%2F3.2.0%2B20230215-1349da9), -the following request would respond back with "Hello, salaboy": +For example, consider binding the `url` to a Ruby interpreter, such as from +[webassembly-language-runtimes](https://github.com/vmware-labs/webassembly-language-runtimes/releases/tag/ruby%2F3.2.0%2B20230215-1349da9): + +```yaml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: wasm +spec: + type: bindings.wasm + version: v1 + metadata: + - name: url + value: "https://github.com/vmware-labs/webassembly-language-runtimes/releases/download/ruby%2F3.2.0%2B20230215-1349da9/ruby-3.2.0-slim.wasm" +``` + +Assuming that you wanted to start your Dapr at port 3500 with the Wasm Binding, you'd run: + +``` +$ dapr run --app-id wasm --dapr-http-port 3500 --resources-path components +``` + +The following request responds `Hello "salaboy"`: -```json +```sh +$ curl -X POST http://localhost:3500/v1.0/bindings/wasm -d' { "operation": "execute", "metadata": { - "args": "-ne,'print \"Hello, \"; print'" + "args": "-ne,print \"Hello \"; print" }, "data": "salaboy" -} +}' ``` ## Related links diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-command.md b/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-command.md index b2edbb1673f..4db06840155 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-command.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-command.md @@ -21,23 +21,26 @@ spec: version: v1 metadata: - name: gatewayAddr - value: : + value: ":" - name: gatewayKeepAlive - value: 45s + value: "45s" - name: usePlainTextConnection - value: true + value: "true" - name: caCertificatePath - value: /path/to/ca-cert + value: "/path/to/ca-cert" + - name: direction + value: "output" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |-------------------------|:--------:|------------|-----|---------| -| gatewayAddr | Y | Output | Zeebe gateway address | `localhost:26500` | -| gatewayKeepAlive | N | Output | Sets how often keep alive messages should be sent to the gateway. Defaults to 45 seconds | `45s` | -| usePlainTextConnection | N | Output | Whether to use a plain text connection or not | `true,false` | -| caCertificatePath | N | Output | The path to the CA cert | `/path/to/ca-cert` | +| `gatewayAddr` | Y | Output | Zeebe gateway address | `"localhost:26500"` | +| `gatewayKeepAlive` | N | Output | Sets how often keep alive messages should be sent to the gateway. Defaults to 45 seconds | `"45s"` | +| `usePlainTextConnection` | N | Output | Whether to use a plain text connection or not | `"true"`, `"false"` | +| `caCertificatePath` | N | Output | The path to the CA cert | `"/path/to/ca-cert"` | +| `direction` | N | Output | The direction of the binding | `"output"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-jobworker.md b/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-jobworker.md index 63717ca7b76..a4c20cff9ba 100644 --- a/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-jobworker.md +++ b/daprdocs/content/en/reference/components-reference/supported-bindings/zeebe-jobworker.md @@ -21,53 +21,56 @@ spec: version: v1 metadata: - name: gatewayAddr - value: : + value: ":" - name: gatewayKeepAlive - value: 45s + value: "45s" - name: usePlainTextConnection - value: true + value: "true" - name: caCertificatePath - value: /path/to/ca-cert + value: "/path/to/ca-cert" - name: workerName - value: products-worker + value: "products-worker" - name: workerTimeout - value: 5m + value: "5m" - name: requestTimeout - value: 15s + value: "15s" - name: jobType - value: fetch-products + value: "fetch-products" - name: maxJobsActive - value: 32 + value: "32" - name: concurrency - value: 4 + value: "4" - name: pollInterval - value: 100ms + value: "100ms" - name: pollThreshold - value: 0.3 + value: "0.3" - name: fetchVariables - value: productId, productName, productKey + value: "productId, productName, productKey" - name: autocomplete - value: true + value: "true" + - name: direction + value: "input" ``` ## Spec metadata fields | Field | Required | Binding support | Details | Example | |-------------------------|:--------:|------------|-----|---------| -| gatewayAddr | Y | Input | Zeebe gateway address | `localhost:26500` | -| gatewayKeepAlive | N | Input | Sets how often keep alive messages should be sent to the gateway. Defaults to 45 seconds | `45s` | -| usePlainTextConnection | N | Input | Whether to use a plain text connection or not | `true,false` | -| caCertificatePath | N | Input | The path to the CA cert | `/path/to/ca-cert` | -| workerName | N | Input | The name of the worker activating the jobs, mostly used for logging purposes | `products-worker` | -| workerTimeout | N | Input | A job returned after this call will not be activated by another call until the timeout has been reached; defaults to 5 minutes | `5m` | -| requestTimeout | N | Input | The request will be completed when at least one job is activated or after the requestTimeout. If the requestTimeout = 0, a default timeout is used. If the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. Defaults to 10 seconds | `30s` | -| jobType | Y | Input | the job type, as defined in the BPMN process (e.g. ``) | `fetch-products` | -| maxJobsActive | N | Input | Set the maximum number of jobs which will be activated for this worker at the same time. Defaults to 32 | `32` | -| concurrency | N | Input | The maximum number of concurrent spawned goroutines to complete jobs. Defaults to 4 | `4` | -| pollInterval | N | Input | Set the maximal interval between polling for new jobs. Defaults to 100 milliseconds | `100ms` | -| pollThreshold | N | Input | Set the threshold of buffered activated jobs before polling for new jobs, i.e. threshold * maxJobsActive. Defaults to 0.3 | `0.3` | -| fetchVariables | N | Input | A list of variables to fetch as the job variables; if empty, all visible variables at the time of activation for the scope of the job will be returned | `productId, productName, productKey` | -| autocomplete | N | Input | Indicates if a job should be autocompleted or not. If not set, all jobs will be auto-completed by default. Disable it if the worker should manually complete or fail the job with either a business error or an incident | `true,false` | +| `gatewayAddr` | Y | Input | Zeebe gateway address | `"localhost:26500"` | +| `gatewayKeepAlive` | N | Input | Sets how often keep alive messages should be sent to the gateway. Defaults to 45 seconds | `"45s"` | +| `usePlainTextConnection` | N | Input | Whether to use a plain text connection or not | `"true"`, `"false"` | +| `caCertificatePath` | N | Input | The path to the CA cert | `"/path/to/ca-cert"` | +| `workerName` | N | Input | The name of the worker activating the jobs, mostly used for logging purposes | `"products-worker"` | +| `workerTimeout` | N | Input | A job returned after this call will not be activated by another call until the timeout has been reached; defaults to 5 minutes | `"5m"` | +| `requestTimeout` | N | Input | The request will be completed when at least one job is activated or after the requestTimeout. If the requestTimeout = 0, a default timeout is used. If the requestTimeout < 0, long polling is disabled and the request is completed immediately, even when no job is activated. Defaults to 10 seconds | `"30s"` | +| `jobType` | Y | Input | the job type, as defined in the BPMN process (e.g. ``) | `"fetch-products"` | +| `maxJobsActive` | N | Input | Set the maximum number of jobs which will be activated for this worker at the same time. Defaults to 32 | `"32"` | +| `concurrency` | N | Input | The maximum number of concurrent spawned goroutines to complete jobs. Defaults to 4 | `"4"` | +| `pollInterval` | N | Input | Set the maximal interval between polling for new jobs. Defaults to 100 milliseconds | `"100ms"` | +| `pollThreshold` | N | Input | Set the threshold of buffered activated jobs before polling for new jobs, i.e. threshold * maxJobsActive. Defaults to 0.3 | `"0.3"` | +| `fetchVariables` | N | Input | A list of variables to fetch as the job variables; if empty, all visible variables at the time of activation for the scope of the job will be returned | `"productId"`, `"productName"`, `"productKey"` | +| `autocomplete` | N | Input | Indicates if a job should be autocompleted or not. If not set, all jobs will be auto-completed by default. Disable it if the worker should manually complete or fail the job with either a business error or an incident | `"true"`, `"false"` | +| `direction` | N | Input | The direction of the binding | `"input"` | ## Binding support diff --git a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md index 4d2eeaea528..6ba16586462 100644 --- a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/azure-appconfig-configuration-store.md @@ -40,7 +40,7 @@ spec: - name: azureCertificateFile # Optional value : "[pfx_certificate_file_fully_qualified_local_path]" - name: subscribePollInterval # Optional - value: #Optional [Expected format example - 1s|1m|1h] + value: #Optional [Expected format example - 30s] ``` @@ -55,9 +55,9 @@ The above example uses secrets as plain strings. It is recommended to use a secr | connectionString | Y* | Connection String for the Azure App Configuration instance. No Default. Can be `secretKeyRef` to use a secret reference. *Mutally exclusive with host field. *Not to be used when [Azure Authentication](https://docs.dapr.io/developing-applications/integrations/azure/authenticating-azure/) is used | `Endpoint=https://foo.azconfig.io;Id=osOX-l9-s0:sig;Secret=00000000000000000000000000000000000000000000` | host | N* | Endpoint for the Azure App Configuration instance. No Default. *Mutally exclusive with connectionString field. *To be used when [Azure Authentication](https://docs.dapr.io/developing-applications/integrations/azure/authenticating-azure/) is used | `https://dapr.azconfig.io` | maxRetries | N | Maximum number of retries before giving up. Defaults to `3` | `5`, `10` -| retryDelay | N | RetryDelay specifies the initial amount of delay to use before retrying an operation. The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. Defaults to `4` seconds; `"-1"` disables delay between retries. | `4000000000` -| maxRetryDelay | N | MaxRetryDelay specifies the maximum delay allowed before retrying an operation. Typically the value is greater than or equal to the value specified in RetryDelay. Defaults to `120` seconds; `"-1"` disables the limit | `120000000000` -| subscribePollInterval | N | subscribePollInterval specifies the poll interval for polling the subscribed keys for any changes. Default polling interval is set to `24` hours. +| retryDelay | N | RetryDelay specifies the initial amount of delay to use before retrying an operation. The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay. Defaults to `4` seconds; `"-1"` disables delay between retries. | `4s` +| maxRetryDelay | N | MaxRetryDelay specifies the maximum delay allowed before retrying an operation. Typically the value is greater than or equal to the value specified in RetryDelay. Defaults to `120` seconds; `"-1"` disables the limit | `120s` +| subscribePollInterval | N | subscribePollInterval specifies the poll interval in nanoseconds for polling the subscribed keys for any changes. This will be updated in the future to Go Time format. Default polling interval is set to `24` hours. | `30s` **Note**: either `host` or `connectionString` must be specified. diff --git a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgres-configuration-store.md b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgres-configuration-store.md deleted file mode 100644 index 43b9820e081..00000000000 --- a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgres-configuration-store.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -type: docs -title: "PostgreSQL" -linkTitle: "PostgreSQL" -description: Detailed information on the PostgreSQL configuration store component -aliases: - - "/operations/components/setup-configuration-store/supported-configuration-stores/setup-postgresql/" - - "/operations/components/setup-configuration-store/supported-configuration-stores/setup-postgres/" ---- - -## Component format - -To set up an PostgreSQL configuration store, create a component of type `configuration.postgresql` - -```yaml -apiVersion: dapr.io/v1alpha1 -kind: Component -metadata: - name: -spec: - type: configuration.postgresql - version: v1 - metadata: - - name: connectionString - value: "host=localhost user=postgres password=example port=5432 connect_timeout=10 database=config" - - name: table # name of the table which holds configuration information - value: "[your_configuration_table_name]" - - name: connMaxIdleTime # max timeout for connection - value : "15s" - -``` - -{{% alert title="Warning" color="warning" %}} -The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). -{{% /alert %}} - -## Spec metadata fields - -| Field | Required | Details | Example | -|--------------------|:--------:|---------|---------| -| connectionString | Y | The connection string for PostgreSQL. Default pool_max_conns = 5 | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test pool_max_conns=10"` -| table | Y | Table name for configuration information, must be lowercased. | `configtable` - -## Set up PostgreSQL as Configuration Store - -1. Start PostgreSQL Database -1. Connect to the PostgreSQL database and setup a configuration table with following schema - - -| Field | Datatype | Nullable |Details | -|--------------------|:--------:|---------|---------| -| KEY | VARCHAR | N |Holds `"Key"` of the configuration attribute | -| VALUE | VARCHAR | N |Holds Value of the configuration attribute | -| VERSION | VARCHAR | N | Holds version of the configuration attribute -| METADATA | JSON | Y | Holds Metadata as JSON - -```console -CREATE TABLE IF NOT EXISTS table_name ( - KEY VARCHAR NOT NULL, - VALUE VARCHAR NOT NULL, - VERSION VARCHAR NOT NULL, - METADATA JSON ); -``` -3. Create a TRIGGER on configuration table. An example function to create a TRIGGER is as follows - -```console -CREATE OR REPLACE FUNCTION configuration_event() RETURNS TRIGGER AS $$ - DECLARE - data json; - notification json; - - BEGIN - - IF (TG_OP = 'DELETE') THEN - data = row_to_json(OLD); - ELSE - data = row_to_json(NEW); - END IF; - - notification = json_build_object( - 'table',TG_TABLE_NAME, - 'action', TG_OP, - 'data', data); - - PERFORM pg_notify('config',notification::text); - RETURN NULL; - END; -$$ LANGUAGE plpgsql; -``` -4. Create the trigger with data encapsulated in the field labelled as `data` -```ps -notification = json_build_object( - 'table',TG_TABLE_NAME, - 'action', TG_OP, - 'data', data); -``` -5. The channel mentioned as attribute to `pg_notify` should be used when subscribing for configuration notifications -6. Since this is a generic created trigger, map this trigger to `configuration table` -```console -CREATE TRIGGER config -AFTER INSERT OR UPDATE OR DELETE ON configtable - FOR EACH ROW EXECUTE PROCEDURE notify_event(); -``` -7. In the subscribe request add an additional metadata field with key as `pgNotifyChannel` and value should be set to same `channel name` mentioned in `pg_notify`. From the above example, it should be set to `config` - -{{% alert title="Note" color="primary" %}} -When calling `subscribe` API, `metadata.pgNotifyChannel` should be used to specify the name of the channel to listen for notifications from PostgreSQL configuration store. - -Any number of keys can be added to a subscription request. Each subscription uses an exclusive database connection. It is strongly recommended to subscribe to multiple keys within a single subscription. This helps optimize the number of connections to the database. - -Example of subscribe HTTP API - -```ps -curl --location --request GET 'http://:/configuration/mypostgresql/subscribe?key=&key=&metadata.pgNotifyChannel=' -``` -{{% /alert %}} - -## Related links -- [Basic schema for a Dapr component]({{< ref component-schema >}}) -- [Configuration building block]({{< ref configuration-api-overview >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md b/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md index eaaa30cdc21..6ec9ba6a456 100644 --- a/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md +++ b/daprdocs/content/en/reference/components-reference/supported-cryptography/azure-key-vault.md @@ -38,7 +38,7 @@ The Azure Key Vault cryptography component supports authentication with Azure AD 1. Read the [Authenticating to Azure]({{< ref "authenticating-azure.md" >}}) document. 1. Create an [Azure AD application]({{< ref "howto-aad.md" >}}) (also called a Service Principal). -1. Alternatively, create a [managed identity]({{< ref "howto-msi.md" >}}) for your application platform. +1. Alternatively, create a [managed identity]({{< ref "howto-mi.md" >}}) for your application platform. ## Spec metadata fields @@ -48,5 +48,6 @@ The Azure Key Vault cryptography component supports authentication with Azure AD | Auth metadata | Y | See [Authenticating to Azure]({{< ref "authenticating-azure.md" >}}) for more information | | ## Related links + - [Cryptography building block]({{< ref cryptography >}}) - [Authenticating to Azure]({{< ref azure-authentication >}}) \ No newline at end of file diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/_index.md b/daprdocs/content/en/reference/components-reference/supported-middleware/_index.md index 6013af82d96..767fafe2dd2 100644 --- a/daprdocs/content/en/reference/components-reference/supported-middleware/_index.md +++ b/daprdocs/content/en/reference/components-reference/supported-middleware/_index.md @@ -8,6 +8,9 @@ no_list: true aliases: - /developing-applications/middleware/supported-middleware/ --- + +The following table lists middleware components supported by Dapr. [Learn how to customize processing pipelines and set up middleware components.]({{< ref "middleware.md" >}}) + {{< partial "components/description.html" >}} {{< partial "components/middleware.html" >}} diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md index e2916a71384..a075548854f 100644 --- a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md +++ b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-bearer.md @@ -22,22 +22,30 @@ spec: version: v1 metadata: - name: audience - value: "" + value: "" - name: issuer value: "" # Optional values - name: jwksURL - value: "https://accounts.google.com/.well-known/openid-configuration" + value: "" ``` + ## Spec metadata fields | Field | Required | Details | Example | |-------|:--------:|---------|---------| | `audience` | Y | The audience expected in the tokens. Usually, this corresponds to the client ID of your application that is created as part of a credential hosted by a OpenID Connect platform. | -| `issuer` | Y | The issuer authority, which is the value expected in the issuer claim in the tokens. | `"https://accounts.google.com"`, `"https://login.salesforce.com"` +| `issuer` | Y | The issuer authority, which is the value expected in the issuer claim in the tokens. | `"https://accounts.google.com"` | `jwksURL` | N | Address of the JWKS (JWK Set containing the public keys for verifying tokens). If empty, will try to fetch the URL set in the OpenID Configuration document `/.well-known/openid-configuration`. | `"https://accounts.google.com/.well-known/openid-configuration"` +Common values for `issuer` include: + +- Auth0: `https://{domain}`, where `{domain}` is the domain of your Auth0 application +- Azure AD: `https://login.microsoftonline.com/{tenant}/v2.0`, where `{tenant}` should be replaced with the tenant ID of your application, as a UUID +- Google: `https://accounts.google.com` +- Salesforce (Force.com): `https://login.salesforce.com` + ## Dapr configuration To be applied, the middleware must be referenced in [configuration]({{< ref configuration-concept.md >}}). See [middleware pipelines]({{< ref "middleware.md">}}). diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md index 51d75b0b88d..d83bda22fb2 100644 --- a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md +++ b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-wasm.md @@ -21,6 +21,10 @@ Wasm binaries are loaded from a URL. For example, the URL `file://rewrite.wasm` loads `rewrite.wasm` from the current directory of the process. On Kubernetes, see [How to: Mount Pod volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts.md >}}) to configure a filesystem mount that can contain Wasm modules. +It is also possible to fetch the Wasm binary from a remote URL. In this case, +the URL must point exactly to one Wasm binary. For example: +- `http://example.com/rewrite.wasm`, or +- `https://example.com/rewrite.wasm`. ## Component format @@ -35,6 +39,8 @@ spec: metadata: - name: url value: "file://router.wasm" + - guestConfig + value: {"environment":"production"} ``` ## Spec metadata fields @@ -44,7 +50,8 @@ How to compile this is described later. | Field | Details | Required | Example | |-------|----------------------------------------------------------------|----------|----------------| -| url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm` | +| url | The URL of the resource including the Wasm binary to instantiate. The supported schemes include `file://`, `http://`, and `https://`. The path of a `file://` URL is relative to the Dapr process unless it begins with `/`. | true | `file://hello.wasm`, `https://example.com/hello.wasm` | +| guestConfig | An optional configuration passed to Wasm guests. Users can pass an arbitrary string to be parsed by the guest code. | false | `enviroment=production`,`{"environment":"production"}` | ## Dapr configuration @@ -116,6 +123,49 @@ If using TinyGo, compile as shown below and set the spec metadata field named tinygo build -o router.wasm -scheduler=none --no-debug -target=wasi router.go` ``` +### Wasm `guestConfig` example + +Here is an example of how to use `guestConfig` to pass configurations to Wasm. In Wasm code, you can use the function `handler.Host.GetConfig` defined in guest SDK to get the configuration. In the following example, the Wasm middleware parses the executed `environment` from JSON config defined in the component. + +```yaml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: wasm +spec: + type: middleware.http.wasm + version: v1 + metadata: + - name: url + value: "file://router.wasm" + - guestConfig + value: {"environment":"production"} +``` +Here's an example in TinyGo: + +```go +package main + +import ( + "encoding/json" + "github.com/http-wasm/http-wasm-guest-tinygo/handler" + "github.com/http-wasm/http-wasm-guest-tinygo/handler/api" +) + +type Config struct { + Environment string `json:"environment"` +} + +func main() { + // get config bytes, which is the value of guestConfig defined in the component. + configBytes := handler.Host.GetConfig() + + config := Config{} + json.Unmarshal(configBytes, &config) + handler.Host.Log(api.LogLevelInfo, "Config environment: "+config.Environment) +} +``` + ## Related links diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/_index.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/_index.md index 9e7c261c05b..9935e3e07ad 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/_index.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/_index.md @@ -9,6 +9,8 @@ aliases: no_list: true --- +The following table lists publish and subscribe brokers supported by the Dapr pub/sub building block. [Learn how to set up different brokers for Dapr publish and subscribe.]({{< ref setup-pubsub.md >}}) + {{< partial "components/description.html" >}} {{< partial "components/pubsub.html" >}} diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md index 7d40177184c..48e2876c2d1 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup Apache Kafka pubsub create a component of type `pubsub.kafka`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}). +To set up Apache Kafka pub/sub, create a component of type `pubsub.kafka`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. All component metadata field values can carry [templated metadata values]({{< ref "component-schema.md#templated-metadata-values" >}}), which are resolved on Dapr sidecar startup. For example, you can choose to use `{namespace}` as the `consumerGroup` to enable using the same `appId` in different namespaces using the same topics as described in [this article]({{< ref "howto-namespace.md#with-namespace-consumer-groups">}}). @@ -27,6 +27,8 @@ spec: value: "dapr-kafka.myapp.svc.cluster.local:9092" - name: consumerGroup # Optional. Used for input bindings. value: "{namespace}" + - name: consumerID # Optional. If not supplied, runtime will create one. + value: "channel1" - name: clientID # Optional. Used as client tracing ID by Kafka brokers. value: "my-dapr-app-id" - name: authType # Required. @@ -49,13 +51,16 @@ spec: value: "true" ``` +> For details on using `secretKeyRef`, see the guide on [how to reference secrets in components]({{< ref component-secrets.md >}}). + ## Spec metadata fields | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | brokers | Y | A comma-separated list of Kafka brokers. | `"localhost:9092,dapr-kafka.myapp.svc.cluster.local:9093"` | consumerGroup | N | A kafka consumer group to listen on. Each record published to a topic is delivered to one consumer within each consumer group subscribed to the topic. | `"group1"` -| clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"sarama"`. | `"my-dapr-app"` +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` +| clientID | N | A user-provided string sent with every request to the Kafka brokers for logging, debugging, and auditing purposes. Defaults to `"namespace.appID"` for Kubernetes mode or `"appID"` for Self-Hosted mode. | `"my-namespace.my-dapr-app"`, `"my-dapr-app"` | authRequired | N | *Deprecated* Enable [SASL](https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer) authentication with the Kafka brokers. | `"true"`, `"false"` | authType | Y | Configure or disable authentication. Supported values: `none`, `password`, `mtls`, or `oidc` | `"password"`, `"none"` | saslUsername | N | The SASL username used for authentication. Only required if `authType` is set to `"password"`. | `"adminuser"` diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md index f0332f08652..61b68290196 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-aws-snssqs.md @@ -9,7 +9,18 @@ aliases: ## Component format -To setup AWS SNS/SQS for pub/sub, create a component of type `pubsub.snssqs`. [Learn more on how to create and apply a pubsub configuration]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}). +To set up AWS SNS/SQS pub/sub, create a component of type `pubsub.aws.snssqs`. + +By default, the AWS SNS/SQS component: +- Generates the SNS topics +- Provisions the SQS queues +- Configures a subscription of the queues to the topics + +{{% alert title="Note" color="primary" %}} +If you only have a publisher and no subscriber, only the SNS topics are created. + +However, if you have a subscriber, SNS, SQS, and the dynamic or static subscription thereof are generated. +{{% /alert %}} ```yaml apiVersion: dapr.io/v1alpha1 @@ -17,7 +28,7 @@ kind: Component metadata: name: snssqs-pubsub spec: - type: pubsub.snssqs + type: pubsub.aws.snssqs version: v1 metadata: - name: accessKey @@ -26,9 +37,11 @@ spec: value: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" - name: region value: "us-east-1" + # - name: consumerID # Optional. If not supplied, runtime will create one. + # value: "channel1" # - name: endpoint # Optional. # value: "http://localhost:4566" - # - name: sessionToken # Optional (mandatory if using AssignedRole, i.e. temporary accessKey and secretKey) + # - name: sessionToken # Optional (mandatory if using AssignedRole; for example, temporary accessKey and secretKey) # value: "TOKEN" # - name: messageVisibilityTimeout # Optional # value: 10 @@ -59,7 +72,7 @@ spec: ``` {{% alert title="Warning" color="warning" %}} -The above example uses secrets as plain strings. It is recommended to use [a secret store for the secrets]]({{< ref component-secrets.md >}}). +The above example uses secrets as plain strings. It is recommended to use [a secret store for the secrets]({{< ref component-secrets.md >}}). {{% /alert %}} ## Spec metadata fields @@ -69,6 +82,7 @@ The above example uses secrets as plain strings. It is recommended to use [a sec | accessKey | Y | ID of the AWS account/role with appropriate permissions to SNS and SQS (see below) | `"AKIAIOSFODNN7EXAMPLE"` | secretKey | Y | Secret for the AWS user/role. If using an `AssumeRole` access, you will also need to provide a `sessionToken` |`"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"` | region | Y | The AWS region where the SNS/SQS assets are located or be created in. See [this page](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/?p=ugi&l=na) for valid regions. Ensure that SNS and SQS are available in that region | `"us-east-1"` +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. | `"channel1"` | endpoint | N | AWS endpoint for the component to use. Only used for local development with, for example, [localstack](https://github.com/localstack/localstack). The `endpoint` is unncessary when running against production AWS | `"http://localhost:4566"` | sessionToken | N | AWS session token to use. A session token is only required if you are using temporary security credentials | `"TOKEN"` | messageReceiveLimit | N | Number of times a message is received, after processing of that message fails, that once reached, results in removing of that message from the queue. If `sqsDeadLettersQueueName` is specified, `messageReceiveLimit` is the number of times a message is received, after processing of that message fails, that once reached, results in moving of the message to the SQS dead-letters queue. Default: `10` | `10` @@ -143,7 +157,7 @@ kind: Component metadata: name: snssqs-pubsub spec: - type: pubsub.snssqs + type: pubsub.aws.snssqs version: v1 metadata: - name: accessKey @@ -242,7 +256,7 @@ In order to run in AWS, create or assign an IAM user with permissions to the SNS Plug the `AWS account ID` and `AWS account secret` into the `accessKey` and `secretKey` in the component metadata, using Kubernetes secrets and `secretKeyRef`. -Alternatively, let's say you want to provision the SNS and SQS assets using your own tool of choice (e.g. Terraform) while preventing Dapr from doing so dynamically. You need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role, with a policy like: +Alternatively, let's say you want to provision the SNS and SQS assets using your own tool of choice (for example, Terraform) while preventing Dapr from doing so dynamically. You need to enable `disableEntityManagement` and assign your Dapr-using application with an IAM Role, with a policy like: ```json { diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md index ca730b57375..24aee2d4c1b 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md @@ -9,7 +9,8 @@ aliases: ## Component format -To setup an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. +To set up an Azure Event Hubs pub/sub, create a component of type `pubsub.azure.eventhubs`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. + Apart from the configuration metadata fields shown below, Azure Event Hubs also supports [Azure Authentication]({{< ref "authenticating-azure.md" >}}) mechanisms. ```yaml @@ -28,6 +29,8 @@ spec: # Use eventHubNamespace when using Azure AD - name: eventHubNamespace value: "namespace" + - name: consumerID # Optional. If not supplied, the runtime will create one. + value: "channel1" - name: enableEntityManagement value: "false" # The following four properties are needed only if enableEntityManagement is set to true @@ -61,6 +64,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|---------|---------| | `connectionString` | Y* | Connection string for the Event Hub or the Event Hub namespace.
* Mutally exclusive with `eventHubNamespace` field.
* Required when not using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={EventHub}"` or `"Endpoint=sb://{EventHubNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key}"` | `eventHubNamespace` | Y* | The Event Hub Namespace name.
* Mutally exclusive with `connectionString` field.
* Required when using [Azure AD Authentication]({{< ref "authenticating-azure.md" >}}) | `"namespace"` +| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | `storageAccountName` | Y | Storage account name to use for the checkpoint store. |`"myeventhubstorage"` | `storageAccountKey` | Y* | Storage account key for the checkpoint store account.
* When using Azure AD, it's possible to omit this if the service principal has access to the storage account too. | `"112233445566778899"` | `storageConnectionString` | Y* | Connection string for the checkpoint store, alternative to specifying `storageAccountKey` | `"DefaultEndpointsProtocol=https;AccountName=myeventhubstorage;AccountKey="` diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md index 2cabba52d98..8ff7dbd5615 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup Azure Service Bus Queues pubsub create a component of type `pubsub.azure.servicebus.queues`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. +To set up Azure Service Bus Queues pub/sub, create a component of type `pubsub.azure.servicebus.queues`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. > This component uses queues on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions). > For using topics, see the [Azure Service Bus Topics pubsub component]({{< ref "setup-azure-servicebus-topics" >}}). @@ -28,6 +28,8 @@ spec: # Required when not using Azure AD Authentication - name: connectionString value: "Endpoint=sb://{ServiceBusNamespace}.servicebus.windows.net/;SharedAccessKeyName={PolicyName};SharedAccessKey={Key};EntityPath={ServiceBus}" + # - name: consumerID # Optional + # value: channel1 # - name: timeoutInSec # Optional # value: 60 # - name: handlerTimeoutInSec # Optional @@ -69,6 +71,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above +| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` | | `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30` | `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30` @@ -134,6 +137,8 @@ To set Azure Service Bus metadata when sending a message, set the query paramete > **Note:** The `metadata.MessageId` property does not set the `id` property of the cloud event returned by Dapr and should be treated in isolation. +> **Note:** The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats. + ### Receiving a message with metadata When Dapr calls your application, it will attach Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata. diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md index 0af413e9c76..7d9ab5b1672 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-topics.md @@ -10,7 +10,7 @@ aliases: ## Component format -To setup Azure Service Bus Topics pubsub create a component of type `pubsub.azure.servicebus.topics`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. +To set up Azure Service Bus Topics pub/sub, create a component of type `pubsub.azure.servicebus.topics`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. > This component uses topics on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions). > For using queues, see the [Azure Service Bus Queues pubsub component]({{< ref "setup-azure-servicebus-queues" >}}). @@ -75,7 +75,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|---------|---------| | `connectionString` | Y | Shared access policy connection string for the Service Bus. Required unless using Azure AD authentication. | See example above | `namespaceName`| N | Parameter to set the address of the Service Bus namespace, as a fully-qualified domain name. Required if using Azure AD authentication. | `"namespace.servicebus.windows.net"` | -| `consumerID` | N | Consumer ID (a.k.a consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. | +| `consumerID` | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. (`appID`) value. | | `timeoutInSec` | N | Timeout for sending messages and for management operations. Default: `60` |`30` | `handlerTimeoutInSec`| N | Timeout for invoking the app's handler. Default: `60` | `30` | `lockRenewalInSec` | N | Defines the frequency at which buffered message locks will be renewed. Default: `20`. | `20` @@ -142,6 +142,8 @@ To set Azure Service Bus metadata when sending a message, set the query paramete > **NOTE:** If the `metadata.SessionId` property is not set but the topic requires sessions then an empty session id will be used. +> **NOTE:** The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats. + ### Receiving a message with metadata When Dapr calls your application, it will attach Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata. diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-gcp-pubsub.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-gcp-pubsub.md index 69e98e64bc7..592c0252e3b 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-gcp-pubsub.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-gcp-pubsub.md @@ -1,7 +1,7 @@ --- type: docs -title: "GCP Pub/Sub" -linkTitle: "GCP Pub/Sub" +title: "GCP" +linkTitle: "GCP" description: "Detailed documentation on the GCP Pub/Sub component" aliases: - "/operations/components/setup-pubsub/supported-pubsub/setup-gcp/" @@ -10,7 +10,7 @@ aliases: ## Create a Dapr component -To setup GCP pubsub create a component of type `pubsub.gcp.pubsub`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration +To set up GCP pub/sub, create a component of type `pubsub.gcp.pubsub`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -72,7 +72,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr |--------------------|:--------:|---------|---------| | projectId | Y | GCP project id| `myproject-123` | endpoint | N | GCP endpoint for the component to use. Only used for local development (for example) with [GCP Pub/Sub Emulator](https://cloud.google.com/pubsub/docs/emulator). The `endpoint` is unnecessary when running against the GCP production API. | `"http://localhost:8085"` -| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the Dapr runtime will set it to the Dapr application ID. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID | +| `consumerID` | N | The Consumer ID organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. The `consumerID`, along with the `topic` provided as part of the request, are used to build the Pub/Sub subscription ID | | identityProjectId | N | If the GCP pubsub project is different from the identity project, specify the identity project using this attribute | `"myproject-123"` | privateKeyId | N | If using explicit credentials, this field should contain the `private_key_id` field from the service account json document | `"my-private-key"` | privateKey | N | If using explicit credentials, this field should contain the `private_key` field from the service account json | `-----BEGIN PRIVATE KEY-----MIIBVgIBADANBgkqhkiG9w0B` diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-inmemory.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-inmemory.md index 8a6a8951b0d..e2275a4652c 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-inmemory.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-inmemory.md @@ -1,13 +1,13 @@ --- type: docs -title: "In Memory" -linkTitle: "In Memory" +title: "In-memory" +linkTitle: "In-memory" description: "Detailed documentation on the In Memory pubsub component" aliases: - "/operations/components/setup-pubsub/supported-pubsub/setup-inmemory/" --- -The In Memory pub/sub component is useful for development purposes and works inside of a single machine boundary. +The in-memory pub/sub component operates within a single Dapr sidecar. This is primarily meant for development purposes. State is not replicated across multiple sidecars and is lost when the Dapr sidecar is restarted. ## Component format @@ -25,6 +25,7 @@ spec: > Note: in-memory does not require any specific metadata for the component to work, however spec.metadata is a required field. ## Related links + - [Basic schema for a Dapr component]({{< ref component-schema >}}) in the Related links section - Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components - [Pub/Sub building block]({{< ref pubsub >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-jetstream.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-jetstream.md index d62a73fe3e0..cfb88b1b032 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-jetstream.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-jetstream.md @@ -8,10 +8,7 @@ aliases: --- ## Component format -To setup JetStream pubsub create a component of type `pubsub.jetstream`. See -[this guide]({{< ref -"howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to -create and apply a pubsub configuration. +To set up JetStream pub/sub, create a component of type `pubsub.jetstream`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -39,9 +36,9 @@ spec: - name: streamName value: "my-stream" - name: durableName - value: "my-durable" + value: "my-durable-subscription" - name: queueGroupName - value: "my-queue" + value: "my-queue-group" - name: startSequence value: 1 - name: startTime # In Unix format @@ -146,6 +143,31 @@ It is essential to create a NATS JetStream for a specific subject. For example, nats -s localhost:4222 stream add myStream --subjects mySubject ``` +## Example: Competing consumers pattern + +Let's say you'd like each message to be processed by only one application or pod with the same app-id. Typically, the `consumerID` metadata spec helps you define competing consumers. + +Since `consumerID` is not supported in NATS JetStream, you need to specify `durableName` and `queueGroupName` to achieve the competing consumers pattern. For example: + +```yml +apiVersion: dapr.io/v1alpha1 +kind: Component +metadata: + name: pubsub +spec: + type: pubsub.jetstream + version: v1 + metadata: + - name: name + value: "my-conn-name" + - name: streamName + value: "my-stream" + - name: durableName + value: "my-durable-subscription" + - name: queueGroupName + value: "my-queue-group" +``` + ## Related links - [Basic schema for a Dapr component]({{< ref component-schema >}}) - Read [this guide]({{< ref "howto-publish-subscribe.md#step-2-publish-a-topic" >}}) for instructions on configuring pub/sub components diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md index 240010115e6..28080ac150c 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-kubemq.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup KubeMQ pub/sub, create a component of type `pubsub.kubemq`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. +To set up KubeMQ pub/sub, create a component of type `pubsub.kubemq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -24,6 +24,8 @@ spec: value: localhost:50000 - name: store value: false + - name: consumerID + value: channel1 ``` ## Spec metadata fields @@ -32,6 +34,7 @@ spec: |-------------------|:--------:|-----------------------------------------------------------------------------------------------------------------------------|----------------------------------------| | address | Y | Address of the KubeMQ server | `"localhost:50000"` | | store | N | type of pubsub, true: pubsub persisted (EventsStore), false: pubsub in-memory (Events) | `true` or `false` (default is `false`) | +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | clientID | N | Name for client id connection | `sub-client-12345` | | authToken | N | Auth JWT token for connection Check out [KubeMQ Authentication](https://docs.kubemq.io/learn/access-control/authentication) | `ew...` | | group | N | Subscriber group for load balancing | `g1` | diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt.md index fe2843c3b94..33d555f4561 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup MQTT pubsub create a component of type `pubsub.mqtt`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration +To set up MQTT pub/sub, create a component of type `pubsub.mqtt`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -28,6 +28,8 @@ spec: value: "false" - name: cleanSession value: "false" + - name: consumerID + value: "channel1" ``` {{% alert title="Warning" color="warning" %}} @@ -62,7 +64,7 @@ There is a crucial difference between the two ways of retries: ### Communication using TLS -To configure communication using TLS, ensure that the MQTT broker (e.g. mosquitto) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example: +To configure communication using TLS, ensure that the MQTT broker (for example, mosquitto) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example: ```yaml apiVersion: dapr.io/v1alpha1 diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt3.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt3.md index bd55a6e83f8..ae1d41b5531 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt3.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-mqtt3.md @@ -10,7 +10,7 @@ aliases: ## Component format -To setup a MQTT3 pubsub create a component of type `pubsub.mqtt3`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration +To set up a MQTT3 pub/sub, create a component of type `pubsub.mqtt3`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -30,6 +30,8 @@ spec: value: "false" - name: qos value: "1" + - name: consumerID + value: "channel1" ``` {{% alert title="Warning" color="warning" %}} @@ -51,7 +53,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr ### Communication using TLS -To configure communication using TLS, ensure that the MQTT broker (e.g. emqx) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example: +To configure communication using TLS, ensure that the MQTT broker (for example, emqx) is configured to support certificates and provide the `caCert`, `clientCert`, `clientKey` metadata in the component configuration. For example: ```yaml apiVersion: dapr.io/v1alpha1 diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-nats-streaming.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-nats-streaming.md index 553d0bada84..3a2f9d21972 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-nats-streaming.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-nats-streaming.md @@ -7,8 +7,17 @@ aliases: - "/operations/components/setup-pubsub/supported-pubsub/setup-nats-streaming/" --- +## ⚠️ Deprecation notice + +{{% alert title="Warning" color="warning" %}} +This component is **deprecated** because the [NATS Streaming Server](hhttps://nats-io.gitbook.io/legacy-nats-docs/nats-streaming-server-aka-stan/developing-with-stan) was deprecated in June 2023 and no longer receives updates. Users are encouraged to switch to using [JetStream]({{< ref setup-jetstream >}} as an alternative. + +This component will be **removed in the Dapr v1.13 release. +{{% /alert %}} + ## Component format -To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See [the how-to guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. + +To set up NATS Streaming pub/sub, create a component of type `pubsub.natsstreaming`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -25,6 +34,8 @@ spec: value: "clusterId" - name: concurrencyMode value: parallel + - name: consumerID # Optional. If not supplied, runtime will create one. + value: "channel1" # below are subscription configuration. - name: subscriptionType value: # Required. Allowed values: topic, queue. @@ -55,10 +66,6 @@ spec: The above example uses secrets as plain strings. It is recommended to [use a secret store for the secrets]({{< ref component-secrets.md >}}). {{% /alert %}} -{{% alert title="Warning" color="warning" %}} -NATS Streaming has been [deprecated](https://github.com/nats-io/nats-streaming-server/#warning--deprecation-notice-warning). Consider using [NATS JetStream]({{< ref setup-jetstream >}}) going forward. -{{% /alert %}} - ## Spec metadata fields | Field | Required | Details | Example | @@ -66,6 +73,7 @@ NATS Streaming has been [deprecated](https://github.com/nats-io/nats-streaming-s | natsURL | Y | NATS server address URL | "`nats://localhost:4222`"| | natsStreamingClusterID | Y | NATS cluster ID |`"clusterId"`| | subscriptionType | Y | Subscription type. Allowed values `"topic"`, `"queue"` | `"topic"` | +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | ackWaitTime | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"300ms"`| | maxInFlight | N | See [here](https://docs.nats.io/developing-with-nats-streaming/acks#acknowledgements) | `"25"` | | durableSubscriptionName | N | [Durable subscriptions](https://docs.nats.io/developing-with-nats-streaming/durables) identification name. | `"my-durable"`| diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-pulsar.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-pulsar.md index d7e53c00825..45726e25363 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-pulsar.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-pulsar.md @@ -9,7 +9,9 @@ aliases: ## Component format -To setup Apache Pulsar pubsub create a component of type `pubsub.pulsar`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. For more information on Apache Pulsar [read the docs](https://pulsar.apache.org/docs/en/concepts-overview/) +To set up Apache Pulsar pub/sub, create a component of type `pubsub.pulsar`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. + +For more information on Apache Pulsar, [read the official docs](https://pulsar.apache.org/docs/en/concepts-overview/). ```yaml apiVersion: dapr.io/v1alpha1 @@ -29,7 +31,7 @@ spec: - name: token value: "eyJrZXlJZCI6InB1bHNhci1wajU0cXd3ZHB6NGIiLCJhbGciOiJIUzI1NiJ9.eyJzd" - name: consumerID - value: "topic1" + value: "channel1" - name: namespace value: "default" - name: persistent @@ -60,6 +62,11 @@ spec: } ``` +{{% alert title="Warning" color="warning" %}} +The above example uses secrets as plain strings. It is recommended to use a [secret store for the secrets]({{< ref component-secrets.md >}}). This component supports storing the `token` parameter and any other sensitive parameter and data as Kubernetes Secrets. +{{% /alert %}} + + ## Spec metadata fields | Field | Required | Details | Example | @@ -68,9 +75,9 @@ spec: | enableTLS | N | Enable TLS. Default: `"false"` | `"true"`, `"false"` | | token | N | Enable Authentication. | [How to create pulsar token](https://pulsar.apache.org/docs/en/security-jwt/#generate-tokens)| | tenant | N | The topic tenant within the instance. Tenants are essential to multi-tenancy in Pulsar, and spread across clusters. Default: `"public"` | `"public"` | -| consumerID | N | Used to set the subscription name or consumer ID. | `"topic1"` +| consumerID | N | Used to set the subscription name or consumer ID. | `"channel1"` | namespace | N | The administrative unit of the topic, which acts as a grouping mechanism for related topics. Default: `"default"` | `"default"` -| persistent | N | Pulsar supports two kinds of topics: [persistent](https://pulsar.apache.org/docs/en/concepts-architecture-overview#persistent-storage) and [non-persistent](https://pulsar.apache.org/docs/en/concepts-messaging/#non-persistent-topics). With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topics is not persisted to storage disks. +| persistent | N | Pulsar supports two kinds of topics: [persistent](https://pulsar.apache.org/docs/en/concepts-architecture-overview#persistent-storage) and [non-persistent](https://pulsar.apache.org/docs/en/concepts-messaging/#non-persistent-topics). With persistent topics, all messages are durably persisted on disks (if the broker is not standalone, messages are durably persisted on multiple disks), whereas data for non-persistent topics is not persisted to storage disks. | disableBatching | N | disable batching.When batching enabled default batch delay is set to 10 ms and default batch size is 1000 messages,Setting `disableBatching: true` will make the producer to send messages individually. Default: `"false"` | `"true"`, `"false"`| | batchingMaxPublishDelay | N | batchingMaxPublishDelay set the time period within which the messages sent will be batched,if batch messages are enabled. If set to a non zero value, messages will be queued until this time interval or batchingMaxMessages (see below) or batchingMaxSize (see below). There are two valid formats, one is the fraction with a unit suffix format, and the other is the pure digital format that is processed as milliseconds. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Default: `"10ms"` | `"10ms"`, `"10"`| | batchingMaxMessages | N | batchingMaxMessages set the maximum number of messages permitted in a batch.If set to a value greater than 1, messages will be queued until this threshold is reached or batchingMaxSize (see below) has been reached or the batch interval has elapsed. Default: `"1000"` | `"1000"`| @@ -80,6 +87,9 @@ spec: | publicKey | N | A public key to be used for publisher and consumer encryption. Value can be one of two options: file path for a local PEM cert, or the cert data string value | | privateKey | N | A private key to be used for consumer encryption. Value can be one of two options: file path for a local PEM cert, or the cert data string value | | keys | N | A comma delimited string containing names of [Pulsar session keys](https://pulsar.apache.org/docs/3.0.x/security-encryption/#how-it-works-in-pulsar). Used in conjunction with `publicKey` for publisher encryption | +| processMode | N | Enable processing multiple messages at once. Default: `"async"` | `"async"`, `"sync"`| +| subscribeType | N | Pulsar supports four kinds of [subscription types](https://pulsar.apache.org/docs/3.0.x/concepts-messaging/#subscription-types). Default: `"shared"` | `"shared"`, `"exclusive"`, `"failover"`, `"key_shared"`| +| partitionKey | N | Sets the key of the message for routing policy. Default: `""` | | ### Enabling message delivery retries @@ -91,8 +101,8 @@ When invoking the Pulsar pub/sub, it's possible to provide an optional delay que These optional parameter names are `metadata.deliverAt` or `metadata.deliverAfter`: -- `deliverAt`: Delay message to deliver at a specified time (RFC3339 format), e.g. `"2021-09-01T10:00:00Z"` -- `deliverAfter`: Delay message to deliver after a specified amount of time, e.g.`"4h5m3s"` +- `deliverAt`: Delay message to deliver at a specified time (RFC3339 format); for example, `"2021-09-01T10:00:00Z"` +- `deliverAfter`: Delay message to deliver after a specified amount of time; for example,`"4h5m3s"` Examples: @@ -202,6 +212,46 @@ spec: value: "-----BEGIN RSA PRIVATE KEY-----\nMIIEpAIBAAKCAQEA1KDAM4L8RtJ+nLaXBrBhzVpvTemsKVZoAct8A+ShepOHT9lg\nHOCGLFGWNla6K6j+b3AV/P/fAAhwj82vwTDdruXSflvSdmYeFAw3Ypphc1A5oM53\nwSRWhg63potBNWqdDzj8ApYgqjpmjYSQdL5/a3golb36GYFrY0MLFTv7wZ87pmMI\nPsOgGIcPbCHker2fRZ34WXYLb1hkeUpwx4eKjpwcg35gccvR6o/UhbKAuc60V1J9\nWof2sNgtlRaQej45wnpjWYzZrIyk5qUbn0QiCdpIrXvYtANq0Id6gP8zJvUEdPIg\nNuYxEmVCl9jI+8eGI6peD0qIt8U80hf9axhJ3QIDAQABAoIBAQCKuHnM4ac/eXM7\nQPDVX1vfgyHc3hgBPCtNCHnXfGFRvFBqavKGxIElBvGOcBS0CWQ+Rg1Ca5kMx3TQ\njSweSYhH5A7pe3Sa5FK5V6MGxJvRhMSkQi/lJZUBjzaIBJA9jln7pXzdHx8ekE16\nBMPONr6g2dr4nuI9o67xKrtfViwRDGaG6eh7jIMlEqMMc6WqyhvI67rlVDSTHFKX\njlMcozJ3IT8BtTzKg2Tpy7ReVuJEpehum8yn1ZVdAnotBDJxI07DC1cbOP4M2fHM\ngfgPYWmchauZuTeTFu4hrlY5jg0/WLs6by8r/81+vX3QTNvejX9UdTHMSIfQdX82\nAfkCKUVhAoGBAOvGv+YXeTlPRcYC642x5iOyLQm+BiSX4jKtnyJiTU2s/qvvKkIu\nxAOk3OtniT9NaUAHEZE9tI71dDN6IgTLQlAcPCzkVh6Sc5eG0MObqOO7WOMCWBkI\nlaAKKBbd6cGDJkwGCJKnx0pxC9f8R4dw3fmXWgWAr8ENiekMuvjSfjZ5AoGBAObd\ns2L5uiUPTtpyh8WZ7rEvrun3djBhzi+d7rgxEGdditeiLQGKyZbDPMSMBuus/5wH\nwfi0xUq50RtYDbzQQdC3T/C20oHmZbjWK5mDaLRVzWS89YG/NT2Q8eZLBstKqxkx\ngoT77zoUDfRy+CWs1xvXzgxagD5Yg8/OrCuXOqWFAoGAPIw3r6ELknoXEvihASxU\nS4pwInZYIYGXpygLG8teyrnIVOMAWSqlT8JAsXtPNaBtjPHDwyazfZrvEmEk51JD\nX0tA8M5ah1NYt+r5JaKNxp3P/8wUT6lyszyoeubWJsnFRfSusuq/NRC+1+KDg/aq\nKnSBu7QGbm9JoT2RrmBv5RECgYBRn8Lj1I1muvHTNDkiuRj2VniOSirkUkA2/6y+\nPMKi+SS0tqcY63v4rNCYYTW1L7Yz8V44U5mJoQb4lvpMbolGhPljjxAAU3hVkItb\nvGVRlSCIZHKczADD4rJUDOS7DYxO3P1bjUN4kkyYx+lKUMDBHFzCa2D6Kgt4dobS\n5qYajQKBgQC7u7MFPkkEMqNqNGu5erytQkBq1v1Ipmf9rCi3iIj4XJLopxMgw0fx\n6jwcwNInl72KzoUBLnGQ9PKGVeBcgEgdI+a+tq+1TJo6Ta+hZSx+4AYiKY18eRKG\neNuER9NOcSVJ7Eqkcw4viCGyYDm2vgNV9HJ0VlAo3RDh8x5spEN+mg==\n-----END RSA PRIVATE KEY-----\n" ``` +### Partition Key + +When invoking the Pulsar pub/sub, it's possible to provide an optional partition key by using the `metadata` query parameter in the request url. + +The parameter name is `partitionKey`. + +Example: + +```shell +curl -X POST http://localhost:3500/v1.0/publish/myPlusar/myTopic?metadata.partitionKey=key1 \ + -H "Content-Type: application/json" \ + -d '{ + "data": { + "message": "Hi" + } + }' +``` + +### Message headers + +All other metadata key/value pairs (that are not `partitionKey`) are set as headers in the Pulsar message. For example, set a `correlationId` for the message: + +```shell +curl -X POST http://localhost:3500/v1.0/publish/myPlusar/myTopic?metadata.correlationId=myCorrelationID&metadata.partitionKey=key1 \ + -H "Content-Type: application/json" \ + -d '{ + "data": { + "message": "Hi" + } + }' +``` + +## Order guarantee + +To ensure that messages arrive in order for each consumer subscribed to a specific key, three conditions must be met. + +1. `subscribeType` should be set to `key_shared`. +2. `partitionKey` must be set. +3. `processMode` should be set to `sync`. + ## Create a Pulsar instance {{< tabs "Self-Hosted" "Kubernetes">}} diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md index 4650786fdc4..f2fecc6501c 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md @@ -9,6 +9,8 @@ aliases: ## Component format +To set up RabbitMQ pub/sub, create a component of type `pubsub.rabbitmq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. + ```yaml apiVersion: dapr.io/v1alpha1 kind: Component @@ -58,6 +60,8 @@ spec: value: fanout - name: saslExternal value: false + - name: ttlInSeconds + value: 60 ``` {{% alert title="Warning" color="warning" %}} @@ -73,7 +77,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | hostname | N* | The RabbitMQ hostname. *Mutally exclusive with connectionString field | `localhost` | | username | N* | The RabbitMQ username. *Mutally exclusive with connectionString field | `username` | | password | N* | The RabbitMQ password. *Mutally exclusive with connectionString field | `password` | -| consumerID | N | Consumer ID a.k.a consumer tag organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer, i.e. a message is processed only once by one of the consumers in the group. If the consumer ID is not set, the dapr runtime will set it to the dapr application ID. | +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | | durable | N | Whether or not to use [durable](https://www.rabbitmq.com/queues.html#durability) queues. Defaults to `"false"` | `"true"`, `"false"` | deletedWhenUnused | N | Whether or not the queue should be configured to [auto-delete](https://www.rabbitmq.com/queues.html) Defaults to `"true"` | `"true"`, `"false"` | autoAck | N | Whether or not the queue consumer should [auto-ack](https://www.rabbitmq.com/confirms.html) messages. Defaults to `"false"` | `"true"`, `"false"` @@ -87,10 +91,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr | maxLen | N | The maximum number of messages of a queue and its dead letter queue (if dead letter enabled). If both `maxLen` and `maxLenBytes` are set then both will apply; whichever limit is hit first will be enforced. Defaults to no limit. | `"1000"` | | maxLenBytes | N | Maximum length in bytes of a queue and its dead letter queue (if dead letter enabled). If both `maxLen` and `maxLenBytes` are set then both will apply; whichever limit is hit first will be enforced. Defaults to no limit. | `"1048576"` | | exchangeKind | N | Exchange kind of the rabbitmq exchange. Defaults to `"fanout"`. | `"fanout"`,`"topic"` | -| saslExternal | N | With TLS, should the username be taken from an additional field (e.g. CN.) See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` | -| caCert | Required for using TLS | Input/Output | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"` -| clientCert | Required for using TLS | Input/Output | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"` -| clientKey | Required for using TLS | Input/Output | TLS client key in PEM format. Must be used with `clientCert`. Can be `secretKeyRef` to use a secret reference. | `"-----BEGIN RSA PRIVATE KEY-----\n\n-----END RSA PRIVATE KEY-----"` +| saslExternal | N | With TLS, should the username be taken from an additional field (for example, CN). See [RabbitMQ Authentication Mechanisms](https://www.rabbitmq.com/access-control.html#mechanisms). Defaults to `"false"`. | `"true"`, `"false"` | +| ttlInSeconds | N | Set message TTL at the component level, which can be overwritten by message level TTL per request. | `"60"` | +| caCert | Required for using TLS | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"` +| clientCert | Required for using TLS | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"` +| clientKey | Required for using TLS | TLS client key in PEM format. Must be used with `clientCert`. Can be `secretKeyRef` to use a secret reference. | `"-----BEGIN RSA PRIVATE KEY-----\n\n-----END RSA PRIVATE KEY-----"` ## Communication using TLS @@ -407,6 +412,14 @@ client.PublishEvent(ctx, PUBSUB_NAME, TOPIC_NAME, []byte(strconv.Itoa(orderId)), {{< /tabs >}} +## Time-to-live + +You can set a time-to-live (TTL) value at either the message or component level. Set default component-level TTL using the component spec `ttlInSeconds` field in your component. + +{{% alert title="Note" color="primary" %}} +If you set both component-level and message-level TTL, the default component-level TTL is ignored in favor of the message-level TTL. +{{% /alert %}} + ## Related links - [Basic schema for a Dapr component]({{< ref component-schema >}}) in the Related links section diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-redis-pubsub.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-redis-pubsub.md index f67256450a7..73b9cc00633 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-redis-pubsub.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-redis-pubsub.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup Redis Streams pubsub create a component of type `pubsub.redis`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. +To set up Redis Streams pub/sub, create a component of type `pubsub.redis`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rocketmq.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rocketmq.md index 1b6a5a87e35..b0e397441d4 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rocketmq.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rocketmq.md @@ -8,7 +8,7 @@ aliases: --- ## Component format -To setup RocketMQ pubsub, create a component of type `pubsub.rocketmq`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pubsub configuration. +To set up RocketMQ pub/sub, create a component of type `pubsub.rocketmq`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -25,6 +25,8 @@ spec: value: dapr-rocketmq-test-g-c - name: producerGroup value: dapr-rocketmq-test-g-p + - name: consumerID + value: topic - name: nameSpace value: dapr-test - name: nameServer @@ -47,6 +49,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | instanceName | N | Instance name | `time.Now().String()` | `dapr-rocketmq-test` | | consumerGroup | N | Consumer group name. Recommend. If `producerGroup` is `null`,`groupName` is used. | | `dapr-rocketmq-test-g-c ` | | producerGroup (consumerID) | N | Producer group name. Recommended. If `producerGroup` is `null`,`consumerID` is used. If `consumerID` also is null, `groupName` is used. | | `dapr-rocketmq-test-g-p` | +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | groupName | N | Consumer/Producer group name. **Depreciated**. | | `dapr-rocketmq-test-g` | | nameSpace | N | RocketMQ namespace | | `dapr-rocketmq` | | nameServerDomain | N | RocketMQ name server domain | | `https://my-app.net:8080/nsaddr` | diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-solace-amqp.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-solace-amqp.md index 6bfe09f49e8..aecbc86fc14 100644 --- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-solace-amqp.md +++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-solace-amqp.md @@ -9,7 +9,7 @@ aliases: ## Component format -To setup Solace-AMQP pub/sub, create a component of type `pubsub.solace.amqp`. See [this guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. +To set up Solace-AMQP pub/sub, create a component of type `pubsub.solace.amqp`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration. ```yaml apiVersion: dapr.io/v1alpha1 @@ -26,6 +26,8 @@ spec: value: 'default' - name: password value: 'default' + - name: consumerID + value: 'channel1' ``` {{% alert title="Warning" color="warning" %}} @@ -39,6 +41,7 @@ The above example uses secrets as plain strings. It is recommended to use a secr | url | Y | Address of the AMQP broker. Can be `secretKeyRef` to use a secret reference.
Use the **`amqp://`** URI scheme for non-TLS communication.
Use the **`amqps://`** URI scheme for TLS communication. | `"amqp://host.domain[:port]"` | username | Y | The username to connect to the broker. Only required if anonymous is not specified or set to `false` .| `default` | password | Y | The password to connect to the broker. Only required if anonymous is not specified or set to `false`. | `default` +| consumerID | N | Consumer ID (consumer tag) organizes one or more consumers into a group. Consumers with the same consumer ID work as one virtual consumer; for example, a message is processed only once by one of the consumers in the group. If the `consumerID` is not provided, the Dapr runtime set it to the Dapr application ID (`appID`) value. | `"channel1"` | anonymous | N | To connect to the broker without credential validation. Only works if enabled on the broker. A username and password would not be required if this is set to `true`. | `true` | caCert | Required for using TLS | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"` | clientCert | Required for using TLS | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"` diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/_index.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/_index.md index c59000f1a76..64277d89579 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/_index.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/_index.md @@ -9,6 +9,8 @@ aliases: no_list: true --- +The following table lists secret stores supported by the Dapr secrets building block. [Learn how to set up different secret stores for Dapr secrets management.]({{< ref setup-secret-store.md >}}) + {{< partial "components/description.html" >}} {{< partial "components/secret-stores.html" >}} diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/alicloud-oos-parameter-store.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/alicloud-oos-parameter-store.md index 360ef4d045b..39c4493a08e 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/alicloud-oos-parameter-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/alicloud-oos-parameter-store.md @@ -43,6 +43,15 @@ The above example uses secrets as plain strings. It is recommended to use a loca | accessKeySecret | Y | The AlibabaCloud Access Key Secret to access this resource | `"accessKeySecret"` | | securityToken | N | The AlibabaCloud Security Token to use | `"securityToken"` | +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api.md#query-parameters" >}}) can be provided when retrieving secrets from this secret store: + +Query Parameter | Description +--------- | ----------- +`metadata.version_id` | Version for the given secret key +`metadata.path` | (For bulk requests only) The path from the metadata. If not set, defaults to root path (all secrets). + ## Create an AlibabaCloud OOS Parameter Store instance Setup AlibabaCloud OOS Parameter Store using the AlibabaCloud documentation: https://www.alibabacloud.com/help/en/doc-detail/186828.html. diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-secret-manager.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-secret-manager.md index e57aece7bd0..1a70638806e 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-secret-manager.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/aws-secret-manager.md @@ -48,6 +48,15 @@ The above example uses secrets as plain strings. It is recommended to use a loca When running the Dapr sidecar (daprd) with your application on EKS (AWS Kubernetes), if you're using a node/pod that has already been attached to an IAM policy defining access to AWS resources, you **must not** provide AWS access-key, secret-key, and tokens in the definition of the component spec you're using. {{% /alert %}} +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided when retrieving secrets from this secret store: + +Query Parameter | Description +--------- | ----------- +`metadata.version_id` | Version for the given secret key. +`metadata.version_stage` | Version stage for the given secret key. + ## Create an AWS Secrets Manager instance Setup AWS Secrets Manager using the AWS documentation: https://docs.aws.amazon.com/secretsmanager/latest/userguide/tutorials_basic.html. diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md index 91ba14867ab..57286c1b3bd 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/azure-keyvault.md @@ -53,6 +53,15 @@ The Azure Key Vault secret store component supports authentication with Azure AD Additionally, you must provide the authentication fields as explained in the [Authenticating to Azure]({{< ref authenticating-azure.md >}}) document. +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided when retrieving secrets from this secret store: + +Query Parameter | Description +--------- | ----------- +`metadata.version_id` | Version for the given secret key. +`metadata.maxresults` | (For bulk requests only) Number of secrets to return, after which the request will be truncated. + ## Example ### Prerequisites diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/gcp-secret-manager.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/gcp-secret-manager.md index 53d22f70802..24a1a155bfe 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/gcp-secret-manager.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/gcp-secret-manager.md @@ -50,7 +50,7 @@ The above example uses secrets as plain strings. It is recommended to use a loca | Field | Required | Details | Example | |--------------------|:--------:|--------------------------------|---------------------| -| type | Y | The type of the account. | `"serviceAccount"` | +| type | Y | The type of the account. | `"service_account"` | | project_id | Y | The project ID associated with this component. | `"project_id"` | | private_key_id | N | The private key ID | `"privatekey"` | | client_email | Y | The client email address | `"client@example.com"` | @@ -61,6 +61,14 @@ The above example uses secrets as plain strings. It is recommended to use a loca | client_x509_cert_url | N | The certificate URL for the client | `"https://www.googleapis.com/robot/v1/metadata/x509/.iam.gserviceaccount.com"`| | private_key | Y | The private key for authentication | `"privateKey"` | +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided to the GCP Secret Manager component: + +Query Parameter | Description +--------- | ----------- +`metadata.version_id` | Version for the given secret key. + ## Setup GCP Secret Manager instance Setup GCP Secret Manager using the GCP documentation: https://cloud.google.com/secret-manager/docs/quickstart. diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/hashicorp-vault.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/hashicorp-vault.md index 19eaebdbdaf..d73ba7db0c2 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/hashicorp-vault.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/hashicorp-vault.md @@ -66,6 +66,14 @@ The above example uses secrets as plain strings. It is recommended to use a loca | enginePath | N | The [engine](https://www.vaultproject.io/api-docs/secret/kv/kv-v2) path in vault. Defaults to `"secret"` | `"kv"`, `"any"` | | vaultValueType | N | Vault value type. `map` means to parse the value into `map[string]string`, `text` means to use the value as a string. 'map' sets the `multipleKeyValuesPerSecret` behavior. `text` makes Vault behave as a secret store with name/value semantics. Defaults to `"map"` | `"map"`, `"text"` | +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided to Hashicorp Vault secret store component: + +Query Parameter | Description +--------- | ----------- +`metadata.version_id` | Version for the given secret key. + ## Setup Hashicorp Vault instance {{< tabs "Self-Hosted" "Kubernetes" >}} diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/huaweicloud-csms.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/huaweicloud-csms.md index 5f5685157ba..329a1296138 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/huaweicloud-csms.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/huaweicloud-csms.md @@ -40,6 +40,15 @@ The above example uses secrets as plain strings. It is recommended to use a loca | accessKey | Y | The HuaweiCloud Access Key to access this resource | `"accessKey"` | | secretAccessKey | Y | The HuaweiCloud Secret Access Key to access this resource | `"secretAccessKey"` | +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided when retrieving secrets from this secret store: + +Query Parameter | Description +--------- | ----------- +`metadata.version_id` | Version for the given secret key. + + ## Setup HuaweiCloud Cloud Secret Management Service (CSMS) instance Setup HuaweiCloud Cloud Secret Management Service (CSMS) using the HuaweiCloud documentation: https://support.huaweicloud.com/intl/en-us/usermanual-dew/dew_01_9993.html. diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md index e323f92caf1..b629503d827 100644 --- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md +++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md @@ -34,6 +34,14 @@ spec: ## Spec metadata fields For the Kubernetes secret store component, there are no metadata attributes. +## Optional per-request metadata properties + +The following [optional query parameters]({{< ref "secrets_api#query-parameters" >}}) can be provided to Kubernetes secret store component: + +Query Parameter | Description +--------- | ----------- +`metadata.namespace`| The namespace of the secret. If not specified, the namespace of the pod is used. + ## Related links - [Secrets building block]({{< ref secrets >}}) - [How-To: Retrieve a secret]({{< ref "howto-secrets.md" >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/_index.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/_index.md index 1ec0a0bfff0..c25a5139e97 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/_index.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/_index.md @@ -9,10 +9,12 @@ aliases: no_list: true --- -{{< partial "components/description.html" >}} +The following table lists state stores supported, at various levels, by the Dapr state management building block. [Learn how to set up different state stores for Dapr state management.]({{< ref setup-state-store.md >}}) -The following stores are supported, at various levels, by the Dapr state management building block: +{{< partial "components/description.html" >}} -> State stores can be used for actors if it supports both transactional operations and etag. +{{% alert title="Note" color="primary" %}} +State stores can be used for actors if it supports both transactional operations and ETag. +{{% /alert %}} {{< partial "components/state-stores.html" >}} diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-inmemory.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-inmemory.md index d54277d1782..17d8cc4be3e 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-inmemory.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-inmemory.md @@ -1,20 +1,16 @@ --- type: docs -title: "In Memory" -linkTitle: "In Memory" -description: "Detailed documentation on the In Memory state component" +title: "In-memory" +linkTitle: "In-memory" +description: "Detailed documentation on the in-memory state component" aliases: - "/operations/components/setup-state-store/supported-state-stores/setup-inmemory/" --- -The In Memory state store component is useful for development purposes and works inside of a single machine boundary. - -{{% alert title="Warning" color="warning" %}} - This component **shouldn't be used for production**. It is developer only and will never be stable. If you come across a scenario and want to use it in production, you can submit an issue and discuss it with the community. - -{{% /alert %}} +The in-memory state store component maintains state in the Dapr sidecar's memory. This is primarily meant for development purposes. State is not replicated across multiple sidecars and is lost when the Dapr sidecar is restarted. ## Component format + To setup in-memory state store, create a component of type `state.in-memory`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration. ```yaml @@ -31,6 +27,7 @@ spec: > Note: While in-memory does not require any specific metadata for the component to work, `spec.metadata` is a required field. ## Related links + - [Basic schema for a Dapr component]({{< ref component-schema >}}) - Learn [how to create and configure state store components]({{< ref howto-get-save-state.md >}}) - Read more about the [state management building block]({{< ref state-management >}}) diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md index 86aa92d9138..e4f48d547b6 100644 --- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md +++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-sqlserver.md @@ -1,15 +1,17 @@ --- type: docs -title: "SQL Server" -linkTitle: "SQL Server" -description: Detailed information on the SQL Server state store component +title: "Microsoft SQL Server & Azure SQL" +linkTitle: "Microsoft SQL Server & Azure SQL" +description: Detailed information on the Microsoft SQL Server state store component aliases: - "/operations/components/setup-state-store/supported-state-stores/setup-sqlserver/" --- ## Component format -To setup SQL Server state store create a component of type `state.sqlserver`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration. +This state store component can be used with both [Microsoft SQL Server](https://learn.microsoft.com/sql/) and [Azure SQL](https://learn.microsoft.com/azure/azure-sql/). + +To set up this state store, create a component of type `state.sqlserver`. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration. ```yaml @@ -21,30 +23,42 @@ spec: type: state.sqlserver version: v1 metadata: - - name: connectionString - value: # Required. - - name: tableName - value: # Optional. defaults to "state" - - name: keyType - value: # Optional. defaults to "string" - - name: keyLength - value: # Optional. defaults to 200. You be used with "string" keyType - - name: schema - value: # Optional. defaults to "dbo" - - name: indexedProperties - value: # Optional. List of IndexedProperties. - - name: metadataTableName # Optional. Name of the table where to store metadata used by Dapr - value: "dapr_metadata" - - name: cleanupIntervalInSeconds # Optional. Cleanup interval in seconds, to remove expired rows - value: 300 - + # Authenticate using SQL Server credentials + - name: connectionString + value: | + Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword; + + # Authenticate with Azure AD (Azure SQL only) + # "useAzureAD" be set to "true" + - name: useAzureAD + value: true + # Connection string or URL of the Azure SQL database, optionally containing the database + - name: connectionString + value: | + sqlserver://myServerName.database.windows.net:1433?database=myDataBase + + # Other optional fields (listing default values) + - name: tableName + value: "state" + - name: metadataTableName + value: "dapr_metadata" + - name: schema + value: "dbo" + - name: keyType + value: "string" + - name: keyLength + value: "200" + - name: indexedProperties + value: "" + - name: cleanupIntervalInSeconds + value: "3600" ``` {{% alert title="Warning" color="warning" %}} The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}). {{% /alert %}} -If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#configuring-state-store-for-actors" >}}), append the following to the yaml. +If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#configuring-state-store-for-actors" >}}), append the following to the metadata: ```yaml - name: actorStateStore @@ -53,24 +67,43 @@ If you wish to use SQL server as an [actor state store]({{< ref "state_api.md#co ## Spec metadata fields +### Authenticate using SQL Server credentials + +The following metadata options are **required** to authenticate using SQL Server credentials. This is supported on both SQL Server and Azure SQL. + +| Field | Required | Details | Example | +|--------|:--------:|---------|---------| +| `connectionString` | Y | The connection string used to connect.
If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"` | + +### Authenticate using Azure AD + +Authenticating with Azure AD is supported with Azure SQL only. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity. + +| Field | Required | Details | Example | +|--------|:--------:|---------|---------| +| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` | +| `connectionString` | Y | The connection string or URL of the Azure SQL database, **without credentials**.
If the connection string contains the database, it must already exist. Otherwise, if the database is omitted, a default database named "Dapr" is created. | `"sqlserver://myServerName.database.windows.net:1433?database=myDataBase"` | +| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-47e1-b4d5-65447fdd542b"` | +| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-4ba2-a905-acd4d3f8f08b"` | +| `azureClientSecret` | N | Client secret (application password) | `"Ecy3XG7zVZK3/vl/a2NSB+a1zXLa8RnMum/IgD0E"` | + +### Other metadata options + | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| -| connectionString | Y | The connection string used to connect. If the connection string contains the database it must already exist. If the database is omitted a default database named `"Dapr"` is created. | `"Server=myServerName\myInstanceName;Database=myDataBase;User Id=myUsername;Password=myPassword;"` -| tableName | N | The name of the table to use. Alpha-numeric with underscores. Defaults to `"state"` | `"table_name"` -| keyType | N | The type of key used. Defaults to `"string"` | `"string"` -| keyLength | N | The max length of key. Used along with `"string"` keytype. Defaults to `"200"` | `"200"` -| schema | N | The schema to use. Defaults to `"dbo"` | `"dapr"`,`"dbo"` -| indexedProperties | N | List of IndexedProperties. | `'[{"column": "transactionid", "property": "id", "type": "int"}, {"column": "customerid", "property": "customer", "type": "nvarchar(100)"}]'` -| actorStateStore | N | Indicates that Dapr should configure this component for the actor state store ([more information]({{< ref "state_api.md#configuring-state-store-for-actors" >}})). | `"true"` -| metadataTableName | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. | `"dapr_metadata"` -| cleanupIntervalInSeconds | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `3600` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `1800`, `-1` +| `tableName` | N | The name of the table to use. Alpha-numeric with underscores. Defaults to `"state"` | `"table_name"` +| `metadataTableName` | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. | `"dapr_metadata"` +| `keyType` | N | The type of key used. Supported values: `"string"` (default), `"uuid"`, `"integer"`.| `"string"` +| `keyLength` | N | The max length of key. Ignored if "keyType" is not `string`. Defaults to `"200"` | `"200"` +| `schema` | N | The schema to use. Defaults to `"dbo"` | `"dapr"`,`"dbo"` +| `indexedProperties` | N | List of indexed properties, as a string containing a JSON document. | `'[{"column": "transactionid", "property": "id", "type": "int"}, {"column": "customerid", "property": "customer", "type": "nvarchar(100)"}]'` +| `actorStateStore` | N | Indicates that Dapr should configure this component for the actor state store ([more information]({{< ref "state_api.md#configuring-state-store-for-actors" >}})). | `"true"` +| `cleanupIntervalInSeconds` | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `"3600"` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `"1800"`, `"-1"` -## Create Azure SQL instance +## Create a Microsoft SQL Server/Azure SQL instance -[Follow the instructions](https://docs.microsoft.com/azure/azure-sql/database/single-database-create-quickstart?view=azuresql&tabs=azure-portal) from the Azure documentation on how to create a SQL database. The database must be created before Dapr consumes it. - -**Note: SQL Server state store also supports SQL Server running on VMs and in Docker.** +[Follow the instructions](https://docs.microsoft.com/azure/azure-sql/database/single-database-create-quickstart?view=azuresql&tabs=azure-portal) from the Azure documentation on how to create a SQL database. The database must be created before Dapr consumes it. In order to setup SQL Server as a state store, you need the following properties: @@ -104,6 +137,7 @@ CREATE CLUSTERED INDEX expiredate_idx ON state(ExpireDate ASC) ``` ## Related links + - [Basic schema for a Dapr component]({{< ref component-schema >}}) - Read [this guide]({{< ref "howto-get-save-state.md#step-2-save-and-retrieve-a-single-state" >}}) for instructions on configuring state store components - [State management building block]({{< ref state-management >}}) diff --git a/daprdocs/content/en/reference/environment/_index.md b/daprdocs/content/en/reference/environment/_index.md index 201f80f36b5..762bb3592a8 100644 --- a/daprdocs/content/en/reference/environment/_index.md +++ b/daprdocs/content/en/reference/environment/_index.md @@ -12,7 +12,7 @@ The following table lists the environment variables used by the Dapr runtime, CL | -------------------- | ---------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | APP_ID | Your application | The id for your application, used for service discovery | | APP_PORT | Dapr sidecar | The port your application is listening on | -| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. | +| APP_API_TOKEN | Your application | The token used by the application to authenticate requests from Dapr API. Read [authenticate requests from Dapr using token authentication]({{< ref app-api-token >}}) for more information. | | DAPR_HTTP_PORT | Your application | The HTTP port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. | | DAPR_GRPC_PORT | Your application | The gRPC port that the Dapr sidecar is listening on. Your application should use this variable to connect to Dapr sidecar instead of hardcoding the port value. Set by the Dapr CLI run command for self-hosted or injected by the `dapr-sidecar-injector` into all the containers in the pod. | | DAPR_API_TOKEN | Dapr sidecar | The token used for Dapr API authentication for requests from the application. [Enable API token authentication in Dapr]({{< ref api-token >}}). | @@ -24,4 +24,6 @@ The following table lists the environment variables used by the Dapr runtime, CL | DAPR_HELM_REPO_PASSWORD | A password for a private Helm chart |The password required to access the private Dapr helm chart. If it can be accessed publicly, this env variable does not need to be set| | OTEL_EXPORTER_OTLP_ENDPOINT | OpenTelemetry Tracing | Sets the Open Telemetry (OTEL) server address, turns on tracing. (Example: `http://localhost:4318`) | | OTEL_EXPORTER_OTLP_INSECURE | OpenTelemetry Tracing | Sets the connection to the endpoint as unencrypted. (`true`, `false`) | -| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) | \ No newline at end of file +| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) | +| DAPR_COMPONENTS_SOCKETS_FOLDER | Dapr runtime and the .NET, Go, and Java pluggable component SDKs | The location or path where Dapr looks for Pluggable Components Unix Domain Socket files. If unset this location defaults to `/tmp/dapr-components-sockets` | +| DAPR_COMPONENTS_SOCKETS_EXTENSION | .NET and Java pluggable component SDKs | A per-SDK configuration that indicates the default file extension applied to socket files created by the SDKs. Not a Dapr-enforced behavior. | \ No newline at end of file diff --git a/daprdocs/content/en/operations/components/component-schema.md b/daprdocs/content/en/reference/resource-specs/component-schema.md similarity index 88% rename from daprdocs/content/en/operations/components/component-schema.md rename to daprdocs/content/en/reference/resource-specs/component-schema.md index bf60364cd7d..0face5b9a62 100644 --- a/daprdocs/content/en/operations/components/component-schema.md +++ b/daprdocs/content/en/reference/resource-specs/component-schema.md @@ -1,12 +1,12 @@ --- type: docs -title: "Component schema" -linkTitle: "Component schema" -weight: 100 -description: "The basic schema for a Dapr component" +title: "Component spec" +linkTitle: "Component" +weight: 1000 +description: "The basic spec for a Dapr component" --- -Dapr defines and registers components using a [CustomResourceDefinition](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). All components are defined as a CRD and can be applied to any hosting environment where Dapr is running, not just Kubernetes. +Dapr defines and registers components using a [resource specifications](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/). All components are defined as a resource and can be applied to any hosting environment where Dapr is running, not just Kubernetes. ## Format @@ -26,12 +26,12 @@ spec: value: [METADATA-VALUE] ``` -## Fields +## Spec fields | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | apiVersion | Y | The version of the Dapr (and Kubernetes if applicable) API you are calling | `dapr.io/v1alpha1` -| kind | Y | The type of CRD. For components is must always be `Component` | `Component` +| kind | Y | The type of resource. For components is must always be `Component` | `Component` | **metadata** | - | **Information about the component registration** | | metadata.name | Y | The name of the component | `prod-statestore` | metadata.namespace | N | The namespace for the component for hosting environments with namespaces | `myapp-namespace` @@ -76,7 +76,7 @@ spec: value: "false" ``` -## Further reading +## Related links - [Components concept]({{< ref components-concept.md >}}) - [Reference secrets in component definitions]({{< ref component-secrets.md >}}) - [Supported state stores]({{< ref supported-state-stores >}}) diff --git a/daprdocs/content/en/reference/resource-specs/configuration-schema.md b/daprdocs/content/en/reference/resource-specs/configuration-schema.md new file mode 100644 index 00000000000..0af3c65f49a --- /dev/null +++ b/daprdocs/content/en/reference/resource-specs/configuration-schema.md @@ -0,0 +1,105 @@ +--- +type: docs +title: "Configuration spec" +linkTitle: "Configuration" +description: "The basic spec for a Dapr Configuration resource" +weight: 5000 +--- + +The `Configuration` is a Dapr resource that is used to configure the Dapr sidecar, control plane, and others. + +## Sidecar format + +```yaml +apiVersion: dapr.io/v1alpha1 +kind: Configuration +metadata: + name: + namespace: +spec: + api: + allowed: + - name: + version: + protocol: + tracing: + samplingRate: + stdout: true + otel: + endpointAddress: + isSecure: false + protocol: + httpPipeline: # for incoming http calls + handlers: + - name: + type: + appHttpPipeline: # for outgoing http calls + handlers: + - name: + type: + secrets: + scopes: + - storeName: + defaultAccess: + deniedSecrets: + components: + deny: + - + accessControl: + defaultAction: + trustDomain: + policies: + - appId: + defaultAction: + trustDomain: + namespace: "default" + operations: + - name: + httpVerb: ['POST', 'GET'] + action: +``` + +### Spec fields + +| Field | Required | Details | Example | +|--------------------|:--------:|---------|---------| +| accessControl | N | Applied to Dapr sidecar for the called application. Enables the configuration of policies that restrict what operations calling applications can perform (via service invocation) on the called appliaction. | [Learn more about the `accessControl` configuration.]({{< ref invoke-allowlist.md >}}) | +| api | N | Used to enable only the Dapr sidecar APIs used by the application. | [Learn more about the `api` configuration.]({{< ref api-allowlist.md >}}) | +| httpPipeline | N | Configure API middleware pipelines | [Middleware pipeline configuration overview]({{< ref "configuration-overview.md#middleware" >}})
[Learn more about the `httpPipeline` configuration.]({{< ref "middleware.md#configure-api-middleware-pipelines" >}}) | +| appHttpPipeline | N | Configure application middleware pipelines | [Middleware pipeline configuration overview]({{< ref "configuration-overview.md#middleware" >}})
[Learn more about the `appHttpPipeline` configuration.]({{< ref "middleware.md#configure-app-middleware-pipelines" >}}) | +| components | N | Used to specify a denylist of component types that can't be initialized. | [Learn more about the `components` configuration.]({{< ref "configuration-overview.md#disallow-usage-of-certain-component-types" >}}) | +| features | N | Defines the preview features that are enabled/disabled. | [Learn more about the `features` configuration.]({{< ref preview-features.md >}}) | +| logging | N | Configure how logging works in the Dapr runtime. | [Learn more about the `logging` configuration.]({{< ref "configuration-overview.md#logging" >}}) | +| metrics | N | Enable or disable metrics for an application. | [Learn more about the `metrics` configuration.]({{< ref "configuration-overview.md#metrics" >}}) | +| nameResolution | N | Name resolution configuration spec for the service invocation building block. | [Learn more about the `nameResolution` configuration per components.]({{< ref supported-name-resolution.md >}}) | +| secrets | N | Limit the secrets to which your Dapr application has access. | [Learn more about the `secrets` configuration.]({{< ref secret-scope.md >}}) | +| tracing | N | Turns on tracing for an application. | [Learn more about the `tracing` configuration.]({{< ref "configuration-overview.md#tracing" >}}) | + + +## Control plane format + +The `daprsystem` configuration file installed with Dapr applies global settings and is only set up when Dapr is deployed to Kubernetes. + +```yml +apiVersion: dapr.io/v1alpha1 +kind: Configuration +metadata: + name: daprsystem + namespace: default +spec: + mtls: + enabled: true + allowedClockSkew: 15m + workloadCertTTL: 24h +``` + +### Spec fields + +| Field | Required | Details | Example | +|--------------------|:--------:|---------|---------| +| mtls | N | Defines the mTLS configuration | `allowedClockSkew: 15m`
`workloadCertTTL:24h`
[Learn more about the `mtls` configuration.]({{< ref "configuration-overview.md#mtls-mutual-tls" >}}) | + + +## Related links + +- [Learn more about how to use configuration specs]({{< ref configuration-overview.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/reference/resource-specs/httpendpoints-reference.md b/daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md similarity index 82% rename from daprdocs/content/en/reference/resource-specs/httpendpoints-reference.md rename to daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md index 66d4f56060e..f6bced2a3c1 100644 --- a/daprdocs/content/en/reference/resource-specs/httpendpoints-reference.md +++ b/daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md @@ -1,16 +1,16 @@ --- type: docs title: "HTTPEndpoint spec" -linkTitle: "HTTPEndpoint spec" -description: "The HTTPEndpoint resource spec" -weight: 300 +linkTitle: "HTTPEndpoint" +description: "The basic spec for a Dapr HTTPEndpoint resource" +weight: 4000 aliases: - "/operations/httpEndpoints/" --- The `HTTPEndpoint` is a Dapr resource that is used to enable the invocation of non-Dapr endpoints from a Dapr application. -## HTTPEndpoint format +## Format ```yaml apiVersion: dapr.io/v1alpha1 @@ -38,4 +38,8 @@ auth: # Optional | Field | Required | Details | Example | |--------------------|:--------:|---------|---------| | baseUrl | Y | Base URL of the non-Dapr endpoint | `"https://api.github.com"`, `"http://api.github.com"` -| headers | N | HTTP request headers for service invocation | `name: "Accept-Language" value: "en-US"`
`name: "Authorization" secretKeyRef.name: "my-secret" secretKeyRef.key: "myGithubToken" ` \ No newline at end of file +| headers | N | HTTP request headers for service invocation | `name: "Accept-Language" value: "en-US"`
`name: "Authorization" secretKeyRef.name: "my-secret" secretKeyRef.key: "myGithubToken" ` + +## Related links + +[Learn how to invoke non-Dapr endpoints.]({{< ref howto-invoke-non-dapr-endpoints.md >}}) \ No newline at end of file diff --git a/daprdocs/content/en/reference/resource-specs/resiliency-schema.md b/daprdocs/content/en/reference/resource-specs/resiliency-schema.md new file mode 100644 index 00000000000..32888adc753 --- /dev/null +++ b/daprdocs/content/en/reference/resource-specs/resiliency-schema.md @@ -0,0 +1,65 @@ +--- +type: docs +title: "Resiliency spec" +linkTitle: "Resiliency" +weight: 3000 +description: "The basic spec for a Dapr resiliency resource" +--- + +The `Resiliency` Dapr resource allows you to define and apply fault tolerance resiliency policies. Resiliency specs are applied when the Dapr sidecar starts. + +## Format + +```yml +apiVersion: dapr.io/v1alpha1 +kind: Resiliency +metadata: + name: +version: v1alpha1 +scopes: + - +spec: + policies: # Required + timeouts: + timeoutName: # Replace with any unique name + retries: + retryName: # Replace with any unique name + policy: + duration: + maxInterval: + maxRetries: + circuitBreakers: + circuitBreakerName: # Replace with any unique name + maxRequests: + timeout: + trip: +targets: # Required + apps: + appID: # Replace with scoped app ID + timeout: + retry: + circuitBreaker: + actors: + myActorType: + timeout: + retry: + circuitBreaker: + circuitBreakerCacheSize: + components: + componentName: # Replace with your component name + outbound: + timeout: + retry: + circuitBreaker: +``` + +## Spec fields + +| Field | Required | Details | Example | +|--------------------|:--------:|---------|---------| +| policies | Y | The configuration of resiliency policies, including:
  • `timeouts`
  • `retries`
  • `circuitBreakers`

[See more examples with all of the built-in policies]({{< ref policies.md >}}) | timeout: `general`
retry: `retryForever`
circuit breaker: `simpleCB` | +| targets | Y | The configuration for the applications, actors, or components that use the resiliency policies.
[See more examples in the resiliency targets guide]({{< ref targets.md >}}) | `apps`
`components`
`actors` | + + +## Related links +[Learn more about resiliency policies and targets]({{< ref resiliency-overview.md >}}) diff --git a/daprdocs/content/en/reference/resource-specs/subscription-schema.md b/daprdocs/content/en/reference/resource-specs/subscription-schema.md new file mode 100644 index 00000000000..55b8bc76f5f --- /dev/null +++ b/daprdocs/content/en/reference/resource-specs/subscription-schema.md @@ -0,0 +1,88 @@ +--- +type: docs +title: "Subscription spec" +linkTitle: "Subscription" +weight: 2000 +description: "The basic spec for a Dapr subscription" +--- + +The `Subscription` Dapr resource allows you to subscribe declaratively to a topic using an external component YAML file. This guide demonstrates two subscription API versions: + +- `v2alpha` (default spec) +- `v1alpha1` (deprecated) + +## `v2alpha1` format + +The following is the basic `v2alpha1` spec for a `Subscription` resource. `v2alpha1` is the default spec for the subscription API. + +```yml +apiVersion: dapr.io/v2alpha1 +kind: Subscription +metadata: + name: +spec: + version: v2alpha1 + topic: # Required + routes: # Required + - rules: + - match: + path: + pubsubname: # Required + deadlettertopic: # Optional + bulksubscribe: # Optional + - enabled: + - maxmessages: + - maxawaitduration: +scopes: +- +``` + +### Spec fields + +| Field | Required | Details | Example | +|--------------------|:--------:|---------|---------| +| topic | Y | The name of the topic to which your component subscribes. | `orders` | +| routes | Y | The routes configuration for this topic, including specifying the condition for sending a message to a specific path. Includes the following fields:
  • match: _Optional._ The CEL expression used to match the event. If not specified, the route is considered the default.
  • path: The path for events that match this rule.
The endpoint to which all topic messages are sent. | `match: event.type == "widget"`
`path: /widgets` | +| pubsubname | N | The name of your pub/sub component. | `pubsub` | +| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` | +| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` | + + +## `v1alpha1` format + +The following is the basic version `v1alpha1` spec for a `Subscription` resource. `v1alpha1` is now deprecated. + +```yml +apiVersion: dapr.io/v1alpha1 +kind: Subscription +metadata: + name: +spec: + version: v1alpha1 + topic: # Required + route: # Required + pubsubname: # Required + deadLetterTopic: # Optional + bulkSubscribe: # Optional + - enabled: + - maxmessages: + - maxawaitduration: +scopes: +- +``` + +### Spec fields + +| Field | Required | Details | Example | +|--------------------|:--------:|---------|---------| +| topic | Y | The name of the topic to which your component subscribes. | `orders` | +| route | Y | The endpoint to which all topic messages are sent. | `/checkout` | +| pubsubname | N | The name of your pub/sub component. | `pubsub` | +| deadlettertopic | N | The name of the dead letter topic that forwards undeliverable messages. | `poisonMessages` | +| bulksubscribe | N | Enable bulk subscribe properties. | `true`, `false` | + +## Related links +- [Learn more about the declarative subscription method]({{< ref "subscription-methods.md#declarative-subscriptions" >}}) +- [Learn more about dead letter topics]({{< ref pubsub-deadletter.md >}}) +- [Learn more about routing messages]({{< ref "howto-route-messages.md#declarative-subscription" >}}) +- [Learn more about bulk subscribing]({{< ref pubsub-bulk.md >}}) \ No newline at end of file diff --git a/daprdocs/data/components/bindings/azure.yaml b/daprdocs/data/components/bindings/azure.yaml index b9ca1008e2f..54d89da3ef1 100644 --- a/daprdocs/data/components/bindings/azure.yaml +++ b/daprdocs/data/components/bindings/azure.yaml @@ -61,4 +61,4 @@ since: "1.0" features: input: true - output: true + output: true \ No newline at end of file diff --git a/daprdocs/data/components/bindings/generic.yaml b/daprdocs/data/components/bindings/generic.yaml index 35ec6d43727..4f63295bd38 100644 --- a/daprdocs/data/components/bindings/generic.yaml +++ b/daprdocs/data/components/bindings/generic.yaml @@ -14,6 +14,14 @@ features: input: true output: false +- component: commercetools GraphQL + link: commercetools + state: Alpha + version: v1 + since: "1.8" + features: + input: false + output: true - component: GraphQL link: graghql state: Alpha @@ -22,6 +30,14 @@ features: input: false output: true +- component: Huawei OBS + link: huawei-obs + state: Alpha + version: v1 + since: "1.8" + features: + input: false + output: true - component: InfluxDB link: influxdb state: Beta @@ -38,6 +54,14 @@ features: input: true output: true +- component: Kitex + link: kitex + state: Alpha + version: v1 + since: "1.11" + features: + input: false + output: true - component: Kubernetes Events link: kubernetes-binding state: Alpha @@ -102,6 +126,14 @@ features: input: false output: true +- component: RethinkDB + link: rethinkdb + state: Beta + version: v1 + since: "1.9" + features: + input: true + output: false - component: SMTP link: smtp state: Alpha diff --git a/daprdocs/data/components/configuration_stores/azure.yaml b/daprdocs/data/components/configuration_stores/azure.yaml new file mode 100644 index 00000000000..c431c628e34 --- /dev/null +++ b/daprdocs/data/components/configuration_stores/azure.yaml @@ -0,0 +1,5 @@ +- component: Azure App Configuration + link: azure-appconfig-configuration-store + state: Alpha + version: v1 + since: "1.9" \ No newline at end of file diff --git a/daprdocs/data/components/configuration_stores/generic.yaml b/daprdocs/data/components/configuration_stores/generic.yaml index e51482aa3db..59591b418e3 100644 --- a/daprdocs/data/components/configuration_stores/generic.yaml +++ b/daprdocs/data/components/configuration_stores/generic.yaml @@ -7,10 +7,4 @@ link: postgresql-configuration-store state: Stable version: v1 - since: "1.11" -- component: Azure App Configuration - link: azure-appconfig-configuration-store - state: Alpha - version: v1 - since: "1.9" - + since: "1.11" \ No newline at end of file diff --git a/daprdocs/data/components/pubsub/generic.yaml b/daprdocs/data/components/pubsub/generic.yaml index 4ceabbfa6f8..99fa5cd2013 100644 --- a/daprdocs/data/components/pubsub/generic.yaml +++ b/daprdocs/data/components/pubsub/generic.yaml @@ -1,6 +1,6 @@ - component: In-memory link: setup-inmemory - state: Beta + state: Stable version: v1 since: "1.7" features: @@ -48,9 +48,9 @@ bulkSubscribe: false - component: NATS Streaming link: setup-nats-streaming - state: Beta + state: Deprecated version: v1 - since: "1.0" + since: "1.11" features: bulkPublish: false bulkSubscribe: false diff --git a/daprdocs/data/components/state_stores/azure.yaml b/daprdocs/data/components/state_stores/azure.yaml index 356d0861c24..6f37e4493e5 100644 --- a/daprdocs/data/components/state_stores/azure.yaml +++ b/daprdocs/data/components/state_stores/azure.yaml @@ -20,7 +20,7 @@ etag: true ttl: true query: true -- component: Azure SQL Server +- component: Microsoft SQL Server link: setup-sqlserver state: Stable version: v1 diff --git a/daprdocs/data/components/state_stores/generic.yaml b/daprdocs/data/components/state_stores/generic.yaml index dd7cacd13ba..a4ffcd52cb2 100644 --- a/daprdocs/data/components/state_stores/generic.yaml +++ b/daprdocs/data/components/state_stores/generic.yaml @@ -77,9 +77,9 @@ query: false - component: In-memory link: setup-inmemory - state: Developer-only + state: Stable version: v1 - since: "1.8" + since: "1.9" features: crud: true transactions: true diff --git a/daprdocs/layouts/partials/components/configuration-stores.html b/daprdocs/layouts/partials/components/configuration-stores.html index 19a123a73cc..ce0d435297d 100644 --- a/daprdocs/layouts/partials/components/configuration-stores.html +++ b/daprdocs/layouts/partials/components/configuration-stores.html @@ -1,5 +1,7 @@ {{- $groups := dict " Generic" $.Site.Data.components.configuration_stores.generic +"Microsoft Azure" $.Site.Data.components.configuration_stores.azure + }} {{ range $group, $components := $groups }} diff --git a/daprdocs/layouts/partials/components/description.html b/daprdocs/layouts/partials/components/description.html index 770fa975bb8..46bb87b46e1 100644 --- a/daprdocs/layouts/partials/components/description.html +++ b/daprdocs/layouts/partials/components/description.html @@ -1,15 +1,29 @@ -

Table captions:

-
-

Status: component certification status

-
- -
-

Since: the version of the Dapr Runtime in which the component first moved to the current status

-
-
-

Component version: the version of the component

-
+

Table headers to note:

+ + + + + + + + + + + + + + + + + + + + + + +
HeaderDescriptionExample
StatusComponent certification status + Alpha
+ Beta
+ Stable
+ +
Component versionThe version of the componentv1
Since runtime versionThe version of the Dapr runtime when the component status was set or updated 1.11
\ No newline at end of file diff --git a/daprdocs/layouts/partials/hooks/body-end.html b/daprdocs/layouts/partials/hooks/body-end.html index 695cf863809..79cbc117cd9 100644 --- a/daprdocs/layouts/partials/hooks/body-end.html +++ b/daprdocs/layouts/partials/hooks/body-end.html @@ -1,19 +1,13 @@ + + {{ with .Site.Params.algolia_docsearch }} - - + {{ end }} - - \ No newline at end of file diff --git a/daprdocs/layouts/partials/hooks/head-end.html b/daprdocs/layouts/partials/hooks/head-end.html index 804fe38e9ec..03e91efa215 100644 --- a/daprdocs/layouts/partials/hooks/head-end.html +++ b/daprdocs/layouts/partials/hooks/head-end.html @@ -1,3 +1,3 @@ {{ with .Site.Params.algolia_docsearch }} - + {{ end }} \ No newline at end of file diff --git a/daprdocs/layouts/partials/page-meta-links.html b/daprdocs/layouts/partials/page-meta-links.html index 6bf0d95a16a..7e87f5bd216 100644 --- a/daprdocs/layouts/partials/page-meta-links.html +++ b/daprdocs/layouts/partials/page-meta-links.html @@ -1,28 +1,53 @@ -{{ if .Path }} -{{ $pathFormatted := replace .Path "\\" "/" }} -{{ $gh_repo := ($.Param "github_repo") }} -{{ $gh_subdir := ($.Param "github_subdir") }} -{{ $gh_project_repo := ($.Param "github_project_repo") }} -{{ $gh_branch := (default "master" ($.Param "github_branch")) }} -{{ if $gh_repo }} -
-{{ $gh_repo_path := printf "%s/content/%s" $gh_branch $pathFormatted }} -{{ if and ($gh_subdir) (.Site.Language.Lang) }} -{{ $gh_repo_path = printf "%s/%s/content/%s/%s" $gh_branch $gh_subdir ($.Site.Language.Lang) $pathFormatted }} -{{ else if .Site.Language.Lang }} -{{ $gh_repo_path = printf "%s/content/%s/%s" $gh_branch ($.Site.Language.Lang) $pathFormatted }} -{{ else if $gh_subdir }} -{{ $gh_repo_path = printf "%s/%s/content/%s" $gh_branch $gh_subdir $pathFormatted }} -{{ end }} -{{ $editURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }} -{{ $createURL := printf "%s/edit/%s" $gh_repo $gh_repo_path }} -{{ $issuesURL := printf "%s/issues/new/choose" $gh_repo}} -{{ $newPageStub := resources.Get "stubs/new-page-template.md" }} -{{ $newPageQS := querify "value" $newPageStub.Content "filename" "change-me.md" | safeURL }} -{{ $newPageURL := printf "%s/new/%s?%s" $gh_repo $gh_repo_path $newPageQS }} +{{ if .File }} +{{ $pathFormatted := replace .File.Path "\\" "/" -}} +{{ $gh_repo := ($.Param "github_repo") -}} +{{ $gh_url := ($.Param "github_url") -}} +{{ $gh_subdir := ($.Param "github_subdir") -}} +{{ $gh_project_repo := ($.Param "github_project_repo") -}} +{{ $gh_branch := (default "main" ($.Param "github_branch")) -}} +
+{{ if $gh_url -}} + {{ warnf "Warning: use of `github_url` is deprecated. For details see https://www.docsy.dev/docs/adding-content/repository-links/#github_url-optional" -}} + {{ T "post_edit_this" }} +{{ else if $gh_repo -}} + {{ $gh_repo_path := printf "%s/content/%s" $gh_branch $pathFormatted -}} + {{ if and ($gh_subdir) (.Site.Language.Lang) -}} + {{ $gh_repo_path = printf "%s/%s/content/%s/%s" $gh_branch $gh_subdir ($.Site.Language.Lang) $pathFormatted -}} + {{ else if .Site.Language.Lang -}} + {{ $gh_repo_path = printf "%s/content/%s/%s" $gh_branch ($.Site.Language.Lang) $pathFormatted -}} + {{ else if $gh_subdir -}} + {{ $gh_repo_path = printf "%s/%s/content/%s" $gh_branch $gh_subdir $pathFormatted -}} + {{ end -}} - {{ T "post_edit_this" }} - {{ T "post_create_issue" }} -
-{{ end }} + {{/* Adjust $gh_repo_path based on path_base_for_github_subdir */ -}} + {{ $ghs_base := $.Param "path_base_for_github_subdir" -}} + {{ $ghs_rename := "" -}} + {{ if reflect.IsMap $ghs_base -}} + {{ $ghs_rename = $ghs_base.to -}} + {{ $ghs_base = $ghs_base.from -}} + {{ end -}} + {{ with $ghs_base -}} + {{ $gh_repo_path = replaceRE . $ghs_rename $gh_repo_path -}} + {{ end -}} + + {{ $viewURL := printf "%s/tree/%s" $gh_repo $gh_repo_path -}} + {{ $editURL := printf "%s/edit/%s" $gh_repo $gh_repo_path -}} + {{ $issuesURL := printf "%s/issues/new/choose" $gh_repo -}} + {{ $newPageStub := resources.Get "stubs/new-page-template.md" -}} + {{ $newPageQS := querify "value" $newPageStub.Content "filename" "change-me.md" | safeURL -}} + {{ $newPageURL := printf "%s/new/%s?%s" $gh_repo $gh_repo_path $newPageQS -}} + + {{ T "post_edit_this" }} + {{ T "post_create_issue" }} + + {{ with $gh_project_repo -}} + {{ $project_issueURL := printf "%s/issues/new/choose" . -}} + {{ T "post_create_project_issue" }} + {{ end -}} + +{{ end -}} +{{ with .CurrentSection.AlternativeOutputFormats.Get "print" -}} + {{ T "print_entire_section" }} {{ end }} +
+{{ end -}} \ No newline at end of file diff --git a/daprdocs/layouts/partials/search-input.html b/daprdocs/layouts/partials/search-input.html new file mode 100644 index 00000000000..22e90024773 --- /dev/null +++ b/daprdocs/layouts/partials/search-input.html @@ -0,0 +1,30 @@ +{{ if .Site.Params.gcs_engine_id -}} + +{{ else if .Site.Params.algolia_docsearch -}} +
+{{ else if .Site.Params.offlineSearch -}} +{{ $offlineSearchIndex := resources.Get "json/offline-search-index.json" | resources.ExecuteAsTemplate "offline-search-index.json" . -}} +{{ if hugo.IsProduction -}} +{{/* Use `md5` as finger print hash function to shorten file name to avoid `file name too long` error. */ -}} +{{ $offlineSearchIndex = $offlineSearchIndex | fingerprint "md5" -}} +{{ end -}} +{{ $offlineSearchLink := $offlineSearchIndex.RelPermalink -}} + + +{{ end -}} diff --git a/daprdocs/layouts/shortcodes/dapr-latest-version.html b/daprdocs/layouts/shortcodes/dapr-latest-version.html index 05d1076caa8..9b4bf780551 100644 --- a/daprdocs/layouts/shortcodes/dapr-latest-version.html +++ b/daprdocs/layouts/shortcodes/dapr-latest-version.html @@ -1 +1 @@ -{{- if .Get "short" }}1.10{{ else if .Get "long" }}1.10.7{{ else if .Get "cli" }}1.10.0{{ else }}1.10.7{{ end -}} +{{- if .Get "short" }}1.11{{ else if .Get "long" }}1.11.3{{ else if .Get "cli" }}1.11.0{{ else }}1.11.3{{ end -}} diff --git a/daprdocs/layouts/shortcodes/table.html b/daprdocs/layouts/shortcodes/table.html new file mode 100644 index 00000000000..7ba0498ba7b --- /dev/null +++ b/daprdocs/layouts/shortcodes/table.html @@ -0,0 +1,6 @@ +{{ $htmlTable := .Inner | markdownify }} +{{ $class := .Get 0 | default "" }} +{{ $old := "" }} +{{ $new := printf "
" $class }} +{{ $htmlTable := replace $htmlTable $old $new }} +{{ $htmlTable | safeHTML }} \ No newline at end of file diff --git a/daprdocs/static/docs/Dapr-june-2023-fuzzing-audit-report.pdf b/daprdocs/static/docs/Dapr-june-2023-fuzzing-audit-report.pdf new file mode 100644 index 00000000000..3a43a7dfd09 Binary files /dev/null and b/daprdocs/static/docs/Dapr-june-2023-fuzzing-audit-report.pdf differ diff --git a/daprdocs/static/docs/Dapr-september-2023-security-audit-report.pdf b/daprdocs/static/docs/Dapr-september-2023-security-audit-report.pdf new file mode 100644 index 00000000000..ebe454f62ac Binary files /dev/null and b/daprdocs/static/docs/Dapr-september-2023-security-audit-report.pdf differ diff --git a/daprdocs/static/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml b/daprdocs/static/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml index 81979ba9b4b..4a1e7aae393 100644 --- a/daprdocs/static/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml +++ b/daprdocs/static/docs/open-telemetry-collector/open-telemetry-collector-appinsights.yaml @@ -71,7 +71,7 @@ spec: spec: containers: - name: otel-collector - image: otel/opentelemetry-collector-contrib:0.50.0 + image: otel/opentelemetry-collector-contrib:0.77.0 command: - "/otelcol-contrib" - "--config=/conf/otel-collector-config.yaml" diff --git a/daprdocs/static/images/binding-overview.png b/daprdocs/static/images/binding-overview.png new file mode 100644 index 00000000000..fd8c08d8e24 Binary files /dev/null and b/daprdocs/static/images/binding-overview.png differ diff --git a/daprdocs/static/images/building-block-service-invocation-example.png b/daprdocs/static/images/building-block-service-invocation-example.png index 286d11eb746..7913ebc9788 100644 Binary files a/daprdocs/static/images/building-block-service-invocation-example.png and b/daprdocs/static/images/building-block-service-invocation-example.png differ diff --git a/daprdocs/static/images/building_blocks.png b/daprdocs/static/images/building_blocks.png index 0a33365675a..a6205cae5a1 100644 Binary files a/daprdocs/static/images/building_blocks.png and b/daprdocs/static/images/building_blocks.png differ diff --git a/daprdocs/static/images/open-telemetry-collector.png b/daprdocs/static/images/open-telemetry-collector.png index d2af81efe12..e47a4972688 100644 Binary files a/daprdocs/static/images/open-telemetry-collector.png and b/daprdocs/static/images/open-telemetry-collector.png differ diff --git a/daprdocs/static/images/overview.png b/daprdocs/static/images/overview.png index 3aeb7f11d22..7888f6a5e2e 100644 Binary files a/daprdocs/static/images/overview.png and b/daprdocs/static/images/overview.png differ diff --git a/daprdocs/static/images/workflow-overview/workflow-monitor-pattern.png b/daprdocs/static/images/workflow-overview/workflow-monitor-pattern.png index bf4ba8d0a2c..e61dc30aa21 100644 Binary files a/daprdocs/static/images/workflow-overview/workflow-monitor-pattern.png and b/daprdocs/static/images/workflow-overview/workflow-monitor-pattern.png differ diff --git a/daprdocs/static/images/workflow-quickstart-overview.png b/daprdocs/static/images/workflow-quickstart-overview.png index c526a0486ad..d616f210622 100644 Binary files a/daprdocs/static/images/workflow-quickstart-overview.png and b/daprdocs/static/images/workflow-quickstart-overview.png differ diff --git a/daprdocs/static/js/copy-code-button.js b/daprdocs/static/js/copy-code-button.js index 579d2514824..ca5d0e26e90 100644 --- a/daprdocs/static/js/copy-code-button.js +++ b/daprdocs/static/js/copy-code-button.js @@ -1,49 +1,35 @@ -function addCopyButtons(clipboard) { - document.querySelectorAll('pre > code').forEach(function(codeBlock) { - var button = document.createElement('button'); - button.className = 'copy-code-button'; - button.type = 'button'; - button.innerText = 'Copy'; +const highlightClass = document.querySelectorAll('.highlight'); - button.addEventListener('click', function() { - clipboard.writeText(codeBlock.textContent).then( - function() { - button.blur(); +highlightClass.forEach(element => { + const copyIcon = document.createElement('i'); + copyIcon.classList.add('fas', 'fa-copy', 'copy-icon'); + copyIcon.style.color = 'white'; + copyIcon.style.display = 'none'; + element.appendChild(copyIcon); - button.innerText = 'Copied!'; - setTimeout(function() { - button.innerText = 'Copy'; - }, 2000); - }, - function(error) { - button.innerText = 'Error'; - console.error(error); - } - ); - }); + element.addEventListener('mouseenter', () => { + copyIcon.style.display = 'inline'; + }); - var pre = codeBlock.parentNode; - if (pre.parentNode.classList.contains('highlight')) { - var highlight = pre.parentNode; - highlight.parentNode.insertBefore(button, highlight); - } else { - pre.parentNode.insertBefore(button, pre); - } - }); -} + element.addEventListener('mouseleave', () => { + copyIcon.style.display = 'none'; + copyIcon.classList.replace('fa-check', 'fa-copy'); + }); -if (navigator && navigator.clipboard) { - addCopyButtons(navigator.clipboard); -} else { - var script = document.createElement('script'); - script.src = - 'https://cdnjs.cloudflare.com/ajax/libs/clipboard-polyfill/2.7.0/clipboard-polyfill.promise.js'; - script.integrity = 'sha256-waClS2re9NUbXRsryKoof+F9qc1gjjIhc2eT7ZbIv94='; - script.crossOrigin = 'anonymous'; + copyIcon.addEventListener('click', async () => { + const selection = window.getSelection(); + const range = document.createRange(); + range.selectNodeContents(element); + selection.removeAllRanges(); + selection.addRange(range); - script.onload = function() { - addCopyButtons(clipboard); - }; - - document.body.appendChild(script); -} + try { + await navigator.clipboard.writeText(selection.toString()); + console.log('Text copied to clipboard'); + copyIcon.classList.replace('fa-copy', 'fa-check'); + selection.removeAllRanges(); + } catch (error) { + console.error('Failed to copy: ', error); + } + }); +}); diff --git a/daprdocs/static/presentations/Dapr-Diagrams.pptx.zip b/daprdocs/static/presentations/Dapr-Diagrams.pptx.zip index ac7ffa00eee..6c49403c113 100644 Binary files a/daprdocs/static/presentations/Dapr-Diagrams.pptx.zip and b/daprdocs/static/presentations/Dapr-Diagrams.pptx.zip differ diff --git a/sdkdocs/dotnet b/sdkdocs/dotnet index edb09a08b7a..2449bcd6691 160000 --- a/sdkdocs/dotnet +++ b/sdkdocs/dotnet @@ -1 +1 @@ -Subproject commit edb09a08b7a2ca63983f5237b307c40cae86d3bb +Subproject commit 2449bcd6691eb49825e0e8e9dff50bd50fd41c2e diff --git a/sdkdocs/go b/sdkdocs/go index effc2f0d3c9..ad25580bcfb 160000 --- a/sdkdocs/go +++ b/sdkdocs/go @@ -1 +1 @@ -Subproject commit effc2f0d3c92ad76e11958e427c8d3b0900e1932 +Subproject commit ad25580bcfb638d56237faec0543565b4d0e134f diff --git a/sdkdocs/java b/sdkdocs/java index d1c61cae40e..9dc842faba3 160000 --- a/sdkdocs/java +++ b/sdkdocs/java @@ -1 +1 @@ -Subproject commit d1c61cae40e7c5d933d92705198506d947960aaa +Subproject commit 9dc842faba3486e518babc29f7fbbca79248bfab diff --git a/sdkdocs/js b/sdkdocs/js index 1e3b6eb859b..7686ab039bc 160000 --- a/sdkdocs/js +++ b/sdkdocs/js @@ -1 +1 @@ -Subproject commit 1e3b6eb859be175e12808c0ff345f40398f209d6 +Subproject commit 7686ab039bcc30f375f922960020d403dd2d3867 diff --git a/sdkdocs/python b/sdkdocs/python index 5051a9d5d92..64e834b0a06 160000 --- a/sdkdocs/python +++ b/sdkdocs/python @@ -1 +1 @@ -Subproject commit 5051a9d5d92003924322a8ddbdf230fb8a872dd7 +Subproject commit 64e834b0a06f5b218efc941b8caf3683968b7208