diff --git a/.github/workflows/website-root.yml b/.github/workflows/website-root.yml
index 1f8e503e4c2..17989accb7d 100644
--- a/.github/workflows/website-root.yml
+++ b/.github/workflows/website-root.yml
@@ -4,11 +4,11 @@ on:
workflow_dispatch:
push:
branches:
- - v1.11
+ - v1.12
pull_request:
types: [opened, synchronize, reopened, closed]
branches:
- - v1.11
+ - v1.12
concurrency:
# Cancel the previously triggered build for only PR build.
diff --git a/README.md b/README.md
index 11ec2756e4d..a189c74f09e 100644
--- a/README.md
+++ b/README.md
@@ -14,8 +14,8 @@ The following branches are currently maintained:
| Branch | Website | Description |
| ------------------------------------------------------------ | -------------------------- | ------------------------------------------------------------------------------------------------ |
-| [v1.11](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. |
-| [v1.12](https://github.com/dapr/docs/tree/v1.12) (pre-release) | https://v1-12.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.12+ go here. |
+| [v1.12](https://github.com/dapr/docs) (primary) | https://docs.dapr.io | Latest Dapr release documentation. Typo fixes, clarifications, and most documentation goes here. |
+| [v1.13](https://github.com/dapr/docs/tree/v1.13) (pre-release) | https://v1-13.docs.dapr.io/ | Pre-release documentation. Doc updates that are only applicable to v1.13+ go here. |
For more information visit the [Dapr branch structure](https://docs.dapr.io/contributing/docs-contrib/contributing-docs/#branch-guidance) document.
diff --git a/daprdocs/config.toml b/daprdocs/config.toml
index d28410be666..5b500e5e12f 100644
--- a/daprdocs/config.toml
+++ b/daprdocs/config.toml
@@ -1,5 +1,5 @@
# Site Configuration
-baseURL = "https://v1-12.docs.dapr.io"
+baseURL = "https://docs.dapr.io"
title = "Dapr Docs"
theme = "docsy"
disableFastRender = true
@@ -168,20 +168,23 @@ offlineSearch = false
github_repo = "https://github.com/dapr/docs"
github_project_repo = "https://github.com/dapr/dapr"
github_subdir = "daprdocs"
-github_branch = "v1.11"
+github_branch = "v1.12"
# Versioning
-version_menu = "v1.11 (latest)"
-version = "v1.11"
+version_menu = "v1.12 (latest)"
+version = "v1.12"
archived_version = false
url_latest_version = "https://docs.dapr.io"
[[params.versions]]
- version = "v1.12 (preview)"
- url = "#"
+ version = "v1.13 (preview)"
+ url = "https://v1-13.docs.dapr.io"
[[params.versions]]
- version = "v1.11 (latest)"
+ version = "v1.12 (latest)"
url = "https://docs.dapr.io"
+[[params.versions]]
+ version = "v1.11"
+ url = "https://v1-11.docs.dapr.io"
[[params.versions]]
version = "v1.10"
url = "https://v1-10.docs.dapr.io"
diff --git a/daprdocs/content/en/concepts/building-blocks-concept.md b/daprdocs/content/en/concepts/building-blocks-concept.md
index 4719626f3c6..1841dd58468 100644
--- a/daprdocs/content/en/concepts/building-blocks-concept.md
+++ b/daprdocs/content/en/concepts/building-blocks-concept.md
@@ -28,5 +28,5 @@ Dapr provides the following building blocks:
| [**Secrets**]({{< ref "secrets-overview.md" >}}) | `/v1.0/secrets` | Dapr provides a secrets building block API and integrates with secret stores such as public cloud stores, local stores and Kubernetes to store the secrets. Services can call the secrets API to retrieve secrets, for example to get a connection string to a database.
| [**Configuration**]({{< ref "configuration-api-overview.md" >}}) | `/v1.0/configuration` | The Configuration API enables you to retrieve and subscribe to application configuration items for supported configuration stores. This enables an application to retrieve specific configuration information, for example, at start up or when configuration changes are made in the store.
| [**Distributed lock**]({{< ref "distributed-lock-api-overview.md" >}}) | `/v1.0-alpha1/lock` | The distributed lock API enables you to take a lock on a resource so that multiple instances of an application can access the resource without conflicts and provide consistency guarantees.
-| [**Workflows**]({{< ref "workflow-overview.md" >}}) | `/v1.0-alpha1/workflow` | The Workflow API enables you to define long running, persistent processes or data flows that span multiple microservices using Dapr workflows or workflow components. The Workflow API can be combined with other Dapr API building blocks. For example, a workflow can call another service with service invocation or retrieve secrets, providing flexibility and portability.
+| [**Workflows**]({{< ref "workflow-overview.md" >}}) | `/v1.0-beta1/workflow` | The Workflow API enables you to define long running, persistent processes or data flows that span multiple microservices using Dapr workflows or workflow components. The Workflow API can be combined with other Dapr API building blocks. For example, a workflow can call another service with service invocation or retrieve secrets, providing flexibility and portability.
| [**Cryptography**]({{< ref "cryptography-overview.md" >}}) | `/v1.0-alpha1/crypto` | The Cryptography API enables you to perform cryptographic operations, such as encrypting and decrypting messages, without exposing keys to your application.
\ No newline at end of file
diff --git a/daprdocs/content/en/concepts/dapr-services/placement.md b/daprdocs/content/en/concepts/dapr-services/placement.md
index 5cb1d999542..7db47a37491 100644
--- a/daprdocs/content/en/concepts/dapr-services/placement.md
+++ b/daprdocs/content/en/concepts/dapr-services/placement.md
@@ -17,7 +17,7 @@ The placement service is deployed as part of `dapr init -k`, or via the Dapr Hel
## Placement tables
-There is an HTTP API `/placement/state` for placement service that exposes placement table information. The API is exposed on the sidecar on the same port as the healthz. This is an unauthenticated endpoint, and is disabled by default. You need to set `DAPR_PLACEMENT_METADATA_ENABLED` environment or `metadata-enabled` command line args to true to enable it. If you are using helm you just need to set `dapr_placement.metadataEnabled` to true.
+There is an [HTTP API `/placement/state` for placement service]({{< ref placement_api.md >}}) that exposes placement table information. The API is exposed on the sidecar on the same port as the healthz. This is an unauthenticated endpoint, and is disabled by default. You need to set `DAPR_PLACEMENT_METADATA_ENABLED` environment or `metadata-enabled` command line args to true to enable it. If you are using helm you just need to set `dapr_placement.metadataEnabled` to true.
### Usecase:
The placement table API can be used for retrieving the current placement table, which contains all the actors registered. This can be helpful for debugging and allows tools to extract and present information about actors.
@@ -83,3 +83,7 @@ updatedAt | timestamp | Timestamp of the actor registered/updated.
"tableVersion": 1
}
```
+
+## Related links
+
+[Learn more about the Placement API.]({{< ref placement_api.md >}})
\ No newline at end of file
diff --git a/daprdocs/content/en/contributing/daprbot.md b/daprdocs/content/en/contributing/daprbot.md
index 2be452643df..64a50a664a3 100644
--- a/daprdocs/content/en/contributing/daprbot.md
+++ b/daprdocs/content/en/contributing/daprbot.md
@@ -6,24 +6,24 @@ weight: 15
description: "List of Dapr bot capabilities."
---
-Dapr bot is a GitHub script that helps with common tasks in the Dapr organization. It is set up individually for each repository ([example](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml)) and can be configured to run on specific events. This reference covers the Dapr bot capabilities from the `dapr` and `components-contrib` repositories only.
+Dapr bot is triggered by a list of commands that helps with common tasks in the Dapr organization. It is set up individually for each repository ([example](https://github.com/dapr/dapr/blob/master/.github/workflows/dapr-bot.yml)) and can be configured to run on specific events. Below is a list of commands and the list of repositories they are implemented on.
## Command reference
-| Command | Target | Description | Who can use | Repository |
-|---------|--------|-------------|-------------|------------|
-| `/assign` | Issue | Assigns an issue to a user or group of users | Anyone | `dapr`, `components-contrib` |
-| `/ok-to-test` | Pull request | `dapr`: trigger end to end tests `components-contrib`: trigger conformance and certification tests | Users listed in the [bot](https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js) | `dapr`, `components-contrib` |
-| `/ok-to-perf` | Pull request | Trigger performance tests. | Users listed in the [bot](https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js) | `dapr` |
-| `/make-me-laugh` | Issue or pull request | Posts a random joke | Users listed in the [bot](https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js) | `dapr`, `components-contrib` |
+| Command | Target | Description | Who can use | Repository |
+| ---------------- | --------------------- | -------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | -------------------------------------- |
+| `/assign` | Issue | Assigns an issue to a user or group of users | Anyone | `dapr`, `components-contrib`, `go-sdk` |
+| `/ok-to-test` | Pull request | `dapr`: trigger end to end tests `components-contrib`: trigger conformance and certification tests | Users listed in the [bot](https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js) | `dapr`, `components-contrib` |
+| `/ok-to-perf` | Pull request | Trigger performance tests. | Users listed in the [bot](https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js) | `dapr` |
+| `/make-me-laugh` | Issue or pull request | Posts a random joke | Users listed in the [bot](https://github.com/dapr/dapr/blob/master/.github/scripts/dapr_bot.js) | `dapr`, `components-contrib` |
## Label reference
You can query issues created by the Dapr bot by using the `created-by/dapr-bot` label ([query](https://github.com/search?q=org%3Adapr%20is%3Aissue%20label%3Acreated-by%2Fdapr-bot%20&type=issues)).
-| Label | Target | What does it do? | Repository |
-|-------|--------|------------------|------------|
-| `docs-needed` | Issue | Creates a new issue in `dapr/docs` to track doc work | `dapr` |
-| `sdk-needed` | Issue | Creates new issues across the SDK repos to track SDK work | `dapr` |
-| `documentation required` | Issue or pull request | Creates a new issue in `dapr/docs` to track doc work | `components-contrib` |
-| `new component` | Issue or pull request | Creates a new issue in `dapr/dapr` to register the new component | `components-contrib` |
+| Label | Target | What does it do? | Repository |
+| ------------------------ | --------------------- | ---------------------------------------------------------------- | -------------------- |
+| `docs-needed` | Issue | Creates a new issue in `dapr/docs` to track doc work | `dapr` |
+| `sdk-needed` | Issue | Creates new issues across the SDK repos to track SDK work | `dapr` |
+| `documentation required` | Issue or pull request | Creates a new issue in `dapr/docs` to track doc work | `components-contrib` |
+| `new component` | Issue or pull request | Creates a new issue in `dapr/dapr` to register the new component | `components-contrib` |
diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md
index c3ceb433307..ad0bce8f934 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/howto-publish-subscribe.md
@@ -219,7 +219,7 @@ namespace CheckoutService.controller
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the subscriber application:
```bash
-dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-ssl dotnet run
+dapr run --app-id checkout --app-port 6002 --dapr-http-port 3602 --dapr-grpc-port 60002 --app-protocol https dotnet run
```
{{% /codetab %}}
@@ -465,7 +465,7 @@ namespace EventService
Navigate to the directory containing the above code, then run the following command to launch both a Dapr sidecar and the publisher application:
```bash
-dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-ssl dotnet run
+dapr run --app-id orderprocessing --app-port 6001 --dapr-http-port 3601 --dapr-grpc-port 60001 --app-protocol https dotnet run
```
{{% /codetab %}}
diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-bulk.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-bulk.md
index b6b592a0612..3961d37570d 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-bulk.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-bulk.md
@@ -302,10 +302,10 @@ In the example above, `bulkSubscribe` is _optional_. If you use `bulkSubscribe`,
- `enabled` is mandatory and enables or disables bulk subscriptions on this topic
- You can optionally configure the max number of messages (`maxMessagesCount`) delivered in a bulk message.
Default value of `maxMessagesCount` for components not supporting bulk subscribe is 100 i.e. for default bulk events between App and Dapr. Please refer [How components handle publishing and subscribing to bulk messages]({{< ref pubsub-bulk >}}).
-If a component supports bulk subscribe, then default value for this parameter can be found in that component doc. Please refer [Supported components]({{< ref pubsub-bulk >}}).
+If a component supports bulk subscribe, then default value for this parameter can be found in that component doc.
- You can optionally provide the max duration to wait (`maxAwaitDurationMs`) before a bulk message is sent to the app.
Default value of `maxAwaitDurationMs` for components not supporting bulk subscribe is 1000 i.e. for default bulk events between App and Dapr. Please refer [How components handle publishing and subscribing to bulk messages]({{< ref pubsub-bulk >}}).
-If a component supports bulk subscribe, then default value for this parameter can be found in that component doc. Please refer [Supported components]({{< ref pubsub-bulk >}}).
+If a component supports bulk subscribe, then default value for this parameter can be found in that component doc.
The application receives an `EntryId` associated with each entry (individual message) in the bulk message. This `EntryId` must be used by the app to communicate the status of that particular entry. If the app fails to notify on an `EntryId` status, it's considered a `RETRY`.
@@ -473,9 +473,41 @@ public class BulkMessageController : ControllerBase
{{< /tabs >}}
## How components handle publishing and subscribing to bulk messages
-Some pub/sub brokers support sending and receiving multiple messages in a single request. When a component supports bulk publish or subscribe operations, Dapr runtime uses them to further optimize the communication between the Dapr sidecar and the underlying pub/sub broker.
-
-For components that do not have bulk publish or subscribe support, Dapr runtime uses the regular publish and subscribe APIs to send and receive messages one by one. This is still more efficient than directly using the regular publish or subscribe APIs, because applications can still send/receive multiple messages in a single request to/from Dapr.
+For event publish/subscribe, two kinds of network transfers are involved.
+1. From/To *App* To/From *Dapr*.
+1. From/To *Dapr* To/From *Pubsub Broker*.
+
+These are the opportunities where optimization is possible. When optimized, a Bulk requests are, which reduce number of overall calls and thus increase throughput and provide better latency.
+
+On enabling Bulk Publish and/or Bulk Subscribe, the communication between the App and Dapr sidecar (Point 1 above) is optimized for **all components**.
+
+Optimization from Dapr sidecar to the pub/sub broker would depend on a number of factors, for example:
+- If the broker inherently supports Bulk pub/sub
+- If the Dapr component is updated to support the use of bulk APIs provided by the broker.
+
+Currently, the following components are updated to support this level of optimization:
+
+
+ Component
+ Bulk Publish
+ Bulk Subscribe
+
+
+ Kafka
+ Yes
+ Yes
+
+
+ Azure Servicebus
+ Yes
+ Yes
+
+
+ Azure Eventhubs
+ Yes
+ Yes
+
+
## Demos
diff --git a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md
index 41c9ac23b6c..ed6b72cc38d 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/pubsub/pubsub-overview.md
@@ -96,6 +96,10 @@ For more information on message routing, read [Dapr pub/sub API reference]({{< r
Sometimes, messages can't be processed because of a variety of possible issues, such as erroneous conditions within the producer or consumer application or an unexpected state change that causes an issue with your application code. Dapr allows developers to set dead letter topics to deal with messages that cannot be delivered to an application. This feature is available on all pub/sub components and prevents consumer applications from endlessly retrying a failed message. For more information, read about [dead letter topics]({{< ref "pubsub-deadletter.md">}})
+### Enabling the outbox pattern
+
+Dapr enables developers to use the outbox pattern for achieving a single transaction across a transactional state store and any message broker. For more information, read [How to enable transactional outbox messaging]({{< ref howto-outbox.md >}})
+
### Namespace consumer groups
Dapr solves multi-tenancy at-scale with [namespaces for consumer groups]({{< ref howto-namespace >}}). Simply include the `"{namespace}"` value in your component metadata for consumer groups to allow multiple namespaces with applications of the same `app-id` to publish and subscribe to the same message broker.
diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md
index 4c5e0224dde..ec48330e35c 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/howto-invoke-non-dapr-endpoints.md
@@ -79,6 +79,52 @@ localhost:3500/v1.0/invoke//method/
curl http://localhost:3602/v1.0/invoke/orderprocessor/method/checkout
```
+## TLS authentication
+
+Using the [HTTPEndpoint resource]({{< ref httpendpoints-schema.md >}}) allows you to use any combination of a root certificate, client certificate and private key according to the authentication requirements of the remote endpoint.
+
+### Example using root certificate
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: HTTPEndpoint
+metadata:
+ name: "external-http-endpoint-tls"
+spec:
+ baseUrl: https://service-invocation-external:443
+ headers:
+ - name: "Accept-Language"
+ value: "en-US"
+ clientTLS:
+ rootCA:
+ secretKeyRef:
+ name: dapr-tls-client
+ key: ca.crt
+```
+
+### Example using client certificate and private key
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: HTTPEndpoint
+metadata:
+ name: "external-http-endpoint-tls"
+spec:
+ baseUrl: https://service-invocation-external:443
+ headers:
+ - name: "Accept-Language"
+ value: "en-US"
+ clientTLS:
+ certificate:
+ secretKeyRef:
+ name: dapr-tls-client
+ key: tls.crt
+ privateKey:
+ secretKeyRef:
+ name: dapr-tls-key
+ key: tls.key
+```
+
## Related Links
- [HTTPEndpoint reference]({{< ref httpendpoints-schema.md >}})
diff --git a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md
index edd542ef985..42d6b304d2f 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/service-invocation/service-invocation-overview.md
@@ -94,6 +94,21 @@ The diagram below shows an example of how this works. If you have 1 instance of
Dapr can run on a variety of [hosting platforms]({{< ref hosting >}}). To enable service discovery and service invocation, Dapr uses pluggable [name resolution components]({{< ref supported-name-resolution >}}). For example, the Kubernetes name resolution component uses the Kubernetes DNS service to resolve the location of other applications running in the cluster. Self-hosted machines can use the mDNS name resolution component. The Consul name resolution component can be used in any hosting environment, including Kubernetes or self-hosted.
+### Streaming for HTTP service invocation
+
+You can handle data as a stream in HTTP service invocation. This can offer improvements in performance and memory utilization when using Dapr to invoke another service using HTTP with large request or response bodies.
+
+The diagram below demonstrates the six steps of data flow.
+
+
+
+1. Request: "App A" to "Dapr sidecar A"
+1. Request: "Dapr sidecar A" to "Dapr sidecar B"
+1. Request: "Dapr sidecar B" to "App B"
+1. Response: "App B" to "Dapr sidecar B"
+1. Response: "Dapr sidecar B" to "Dapr sidecar A"
+1. Response: "Dapr sidecar A" to "App A"
+
## Example Architecture
Following the above call sequence, suppose you have the applications as described in the [Hello World tutorial](https://github.com/dapr/quickstarts/blob/master/tutorials/hello-world/README.md), where a python app invokes a node.js app. In such a scenario, the python app would be "Service A" , and a Node.js app would be "Service B".
diff --git a/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-outbox.md b/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-outbox.md
new file mode 100644
index 00000000000..c53930f2acd
--- /dev/null
+++ b/daprdocs/content/en/developing-applications/building-blocks/state-management/howto-outbox.md
@@ -0,0 +1,112 @@
+---
+type: docs
+title: "How-To: Enable the transactional outbox pattern"
+linkTitle: "How-To: Enable the transactional outbox pattern"
+weight: 400
+description: "Commit a single transaction across a state store and pub/sub message broker"
+---
+
+The transactional outbox pattern is a well known design pattern for sending notifications regarding changes in an application's state. The transactional outbox pattern uses a single transaction that spans across the database and the message broker delivering the notification.
+
+Developers are faced with many difficult technical challenges when trying to implement this pattern on their own, which often involves writing error-prone central coordination managers that, at most, support a combination of one or two databases and message brokers.
+
+For example, you can use the outbox pattern to:
+1. Write a new user record to an account database.
+1. Send a notification message that the account was successfully created.
+
+With Dapr's outbox support, you can notify subscribers when an application's state is created or updated when calling Dapr's [transactions API]({{< ref "state_api.md#state-transactions" >}}).
+
+The diagram below is an overview of how the outbox feature works:
+
+1) Service A saves/updates state to the state store using a transaction.
+2) A message is written to the broker under the same transaction. When the message is successfully delivered to the message broker, the transaction completes, ensuring the state and message are transacted together.
+3) The message broker delivers the message topic to any subscribers - in this case, Service B.
+
+
+
+## Requirements
+
+The outbox feature can be used with using any [transactional state store]({{< ref supported-state-stores >}}) supported by Dapr. All [pub/sub brokers]({{< ref supported-pubsub >}}) are supported with the outbox feature.
+
+{{% alert title="Note" color="primary" %}}
+Message brokers that work with the competing consumer pattern (for example, [Apache Kafka]({{< ref setup-apache-kafka>}}) are encouraged to reduce the chances of duplicate events.
+{{% /alert %}}
+
+## Usage
+
+To enable the outbox feature, add the following required and optional fields on a state store component:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: mysql-outbox
+spec:
+ type: state.mysql
+ version: v1
+ metadata:
+ - name: connectionString
+ value: ""
+ - name: outboxPublishPubsub # Required
+ value: "mypubsub"
+ - name: outboxPublishTopic # Required
+ value: "newOrder"
+ - name: outboxPubsub # Optional
+ value: "myOutboxPubsub"
+ - name: outboxDiscardWhenMissingState #Optional. Defaults to false
+ value: false
+```
+
+### Metadata fields
+
+| Name | Required | Default Value | Description |
+| --------------------|-------------|---------------|------------------------------------------------------- |
+| outboxPublishPubsub | Yes | N/A | Sets the name of the pub/sub component to deliver the notifications when publishing state changes
+| outboxPublishTopic | Yes | N/A | Sets the topic that receives the state changes on the pub/sub configured with `outboxPublishPubsub`. The message body will be a state transaction item for an `insert` or `update` operation
+| outboxPubsub | No | `outboxPublishPubsub` | Sets the pub/sub component used by Dapr to coordinate the state and pub/sub transactions. If not set, the pub/sub component configured with `outboxPublishPubsub` is used. This is useful if you want to separate the pub/sub component used to send the notification state changes from the one used to coordinate the transaction
+| outboxDiscardWhenMissingState | No | `false` | By setting `outboxDiscardWhenMissingState` to `true`, Dapr discards the transaction if it cannot find the state in the database and does not retry. This setting can be useful if the state store data has been deleted for any reason before Dapr was able to deliver the message and you would like Dapr to drop the items from the pub/sub and stop retrying to fetch the state
+
+### Combining outbox and non-outbox messages on the same state store
+
+If you want to use the same state store for sending both outbox and non-outbox messages, simply define two state store components that connect to the same state store, where one has the outbox feature and the other does not.
+
+#### MySQL state store without outbox
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: mysql
+spec:
+ type: state.mysql
+ version: v1
+ metadata:
+ - name: connectionString
+ value: ""
+```
+
+#### MySQL state store with outbox
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: mysql-outbox
+spec:
+ type: state.mysql
+ version: v1
+ metadata:
+ - name: connectionString
+ value: ""
+ - name: outboxPublishPubsub # Required
+ value: "mypubsub"
+ - name: outboxPublishTopic # Required
+ value: "newOrder"
+```
+
+## Demo
+
+Watch [this video for an overview of the outbox pattern](https://youtu.be/rTovKpG0rhY?t=1338):
+
+
+
diff --git a/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md b/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md
index afc6bd5f1e4..89c31dc5e1b 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/state-management/state-management-overview.md
@@ -116,6 +116,10 @@ Dapr enables states to be:
For more details read [How-To: Share state between applications]({{< ref howto-share-state.md >}}),
+### Enabling the outbox pattern
+
+Dapr enables developers to use the outbox pattern for achieving a single transaction across a transactional state store and any message broker. For more information, read [How to enable transactional outbox messaging]({{< ref howto-outbox.md >}})
+
### Querying state
There are two ways to query the state:
diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-author-workflow.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-author-workflow.md
index 6caa7f3ffc7..98f0df760e9 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-author-workflow.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-author-workflow.md
@@ -6,6 +6,10 @@ weight: 5000
description: "Learn how to develop and author workflows"
---
+{{% alert title="Note" color="primary" %}}
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
+{{% /alert %}}
+
This article provides a high-level overview of how to author workflows that are executed by the Dapr Workflow engine.
{{% alert title="Note" color="primary" %}}
@@ -30,7 +34,25 @@ The Dapr sidecar doesn’t load any workflow definitions. Rather, the sidecar si
[Workflow activities]({{< ref "workflow-features-concepts.md#workflow-activites" >}}) are the basic unit of work in a workflow and are the tasks that get orchestrated in the business process.
-{{< tabs ".NET" Python >}}
+{{< tabs Python ".NET" Java >}}
+
+{{% codetab %}}
+
+
+
+Define the workflow activities you'd like your workflow to perform. Activities are a function definition and can take inputs and outputs. The following example creates a counter (activity) called `hello_act` that notifies users of the current counter value. `hello_act` is a function derived from a class called `WorkflowActivityContext`.
+
+```python
+def hello_act(ctx: WorkflowActivityContext, input):
+ global counter
+ counter += input
+ print(f'New counter value is: {counter}!', flush=True)
+```
+
+[See the `hello_act` workflow activity in context.](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py#LL40C1-L43C59)
+
+
+{{% /codetab %}}
{{% codetab %}}
@@ -102,29 +124,76 @@ public class ProcessPaymentActivity : WorkflowActivity
{{% codetab %}}
-
+
-Define the workflow activities you'd like your workflow to perform. Activities are a function definition and can take inputs and outputs. The following example creates a counter (activity) called `hello_act` that notifies users of the current counter value. `hello_act` is a function derived from a class called `WorkflowActivityContext`.
+Define the workflow activities you'd like your workflow to perform. Activities are wrapped in the public `DemoWorkflowActivity` class, which implements the workflow activities.
-```python
-def hello_act(ctx: WorkflowActivityContext, input):
- global counter
- counter += input
- print(f'New counter value is: {counter}!', flush=True)
-```
+```java
+@JsonAutoDetect(fieldVisibility = JsonAutoDetect.Visibility.ANY)
+public class DemoWorkflowActivity implements WorkflowActivity {
-[See the `hello_act` workflow activity in context.](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py#LL40C1-L43C59)
+ @Override
+ public DemoActivityOutput run(WorkflowActivityContext ctx) {
+ Logger logger = LoggerFactory.getLogger(DemoWorkflowActivity.class);
+ logger.info("Starting Activity: " + ctx.getName());
+ var message = ctx.getInput(DemoActivityInput.class).getMessage();
+ var newMessage = message + " World!, from Activity";
+ logger.info("Message Received from input: " + message);
+ logger.info("Sending message to output: " + newMessage);
+
+ logger.info("Sleeping for 5 seconds to simulate long running operation...");
+
+ try {
+ TimeUnit.SECONDS.sleep(5);
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+
+ logger.info("Activity finished");
+
+ var output = new DemoActivityOutput(message, newMessage);
+ logger.info("Activity returned: " + output);
+
+ return output;
+ }
+}
+```
+
+[See the Java SDK workflow activity example in context.](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowActivity.java)
{{% /codetab %}}
+
{{< /tabs >}}
## Write the workflow
Next, register and call the activites in a workflow.
-{{< tabs ".NET" Python >}}
+{{< tabs Python ".NET" Java >}}
+
+{{% codetab %}}
+
+
+
+The `hello_world_wf` function is derived from a class called `DaprWorkflowContext` with input and output parameter types. It also includes a `yield` statement that does the heavy lifting of the workflow and calls the workflow activities.
+
+```python
+def hello_world_wf(ctx: DaprWorkflowContext, input):
+ print(f'{input}')
+ yield ctx.call_activity(hello_act, input=1)
+ yield ctx.call_activity(hello_act, input=10)
+ yield ctx.wait_for_external_event("event1")
+ yield ctx.call_activity(hello_act, input=100)
+ yield ctx.call_activity(hello_act, input=1000)
+```
+
+[See the `hello_world_wf` workflow in context.](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py#LL32C1-L38C51)
+
+
+{{% /codetab %}}
{{% codetab %}}
@@ -171,103 +240,42 @@ The `OrderProcessingWorkflow` class is derived from a base class called `Workflo
{{% codetab %}}
-
+
-The `hello_world_wf` function is derived from a class called `DaprWorkflowContext` with input and output parameter types. It also includes a `yield` statement that does the heavy lifting of the workflow and calls the workflow activities.
-
-```python
-def hello_world_wf(ctx: DaprWorkflowContext, input):
- print(f'{input}')
- yield ctx.call_activity(hello_act, input=1)
- yield ctx.call_activity(hello_act, input=10)
- yield ctx.wait_for_external_event("event1")
- yield ctx.call_activity(hello_act, input=100)
- yield ctx.call_activity(hello_act, input=1000)
-```
-
-[See the `hello_world_wf` workflow in context.](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py#LL32C1-L38C51)
-
-
-{{% /codetab %}}
-
-{{< /tabs >}}
-
-## Write the application
-
-Finally, compose the application using the workflow.
-
-{{< tabs ".NET" Python >}}
-
-{{% codetab %}}
+Next, register the workflow with the `WorkflowRuntimeBuilder` and start the workflow runtime.
-
+```java
+public class DemoWorkflowWorker {
-[In the following `Program.cs` example](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Program.cs), for a basic ASP.NET order processing application using the .NET SDK, your project code would include:
+ public static void main(String[] args) throws Exception {
-- A NuGet package called `Dapr.Workflow` to receive the .NET SDK capabilities
-- A builder with an extension method called `AddDaprWorkflow`
- - This will allow you to register workflows and workflow activities (tasks that workflows can schedule)
-- HTTP API calls
- - One for submitting a new order
- - One for checking the status of an existing order
+ // Register the Workflow with the builder.
+ WorkflowRuntimeBuilder builder = new WorkflowRuntimeBuilder().registerWorkflow(DemoWorkflow.class);
+ builder.registerActivity(DemoWorkflowActivity.class);
-```csharp
-using Dapr.Workflow;
-//...
+ // Build and then start the workflow runtime pulling and executing tasks
+ try (WorkflowRuntime runtime = builder.build()) {
+ System.out.println("Start workflow runtime");
+ runtime.start();
+ }
-// Dapr Workflows are registered as part of the service configuration
-builder.Services.AddDaprWorkflow(options =>
-{
- // Note that it's also possible to register a lambda function as the workflow
- // or activity implementation instead of a class.
- options.RegisterWorkflow();
+ System.exit(0);
+ }
+}
+```
- // These are the activities that get invoked by the workflow(s).
- options.RegisterActivity();
- options.RegisterActivity();
- options.RegisterActivity();
-});
+[See the Java SDK workflow in context.](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowWorker.java)
-WebApplication app = builder.Build();
-// POST starts new order workflow instance
-app.MapPost("/orders", async (WorkflowEngineClient client, [FromBody] OrderPayload orderInfo) =>
-{
- if (orderInfo?.Name == null)
- {
- return Results.BadRequest(new
- {
- message = "Order data was missing from the request",
- example = new OrderPayload("Paperclips", 99.95),
- });
- }
-
-//...
-});
-
-// GET fetches state for order workflow to report status
-app.MapGet("/orders/{orderId}", async (string orderId, WorkflowEngineClient client) =>
-{
- WorkflowState state = await client.GetWorkflowStateAsync(orderId, true);
- if (!state.Exists)
- {
- return Results.NotFound($"No order with ID = '{orderId}' was found.");
- }
+{{% /codetab %}}
- var httpResponsePayload = new
- {
- details = state.ReadInputAs(),
- status = state.RuntimeStatus.ToString(),
- result = state.ReadOutputAs(),
- };
+{{< /tabs >}}
-//...
-}).WithName("GetOrderInfoEndpoint");
+## Write the application
-app.Run();
-```
+Finally, compose the application using the workflow.
-{{% /codetab %}}
+{{< tabs Python ".NET" Java >}}
{{% codetab %}}
@@ -356,6 +364,124 @@ if __name__ == '__main__':
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+
+[In the following `Program.cs` example](https://github.com/dapr/dotnet-sdk/blob/master/examples/Workflow/WorkflowConsoleApp/Program.cs), for a basic ASP.NET order processing application using the .NET SDK, your project code would include:
+
+- A NuGet package called `Dapr.Workflow` to receive the .NET SDK capabilities
+- A builder with an extension method called `AddDaprWorkflow`
+ - This will allow you to register workflows and workflow activities (tasks that workflows can schedule)
+- HTTP API calls
+ - One for submitting a new order
+ - One for checking the status of an existing order
+
+```csharp
+using Dapr.Workflow;
+//...
+
+// Dapr Workflows are registered as part of the service configuration
+builder.Services.AddDaprWorkflow(options =>
+{
+ // Note that it's also possible to register a lambda function as the workflow
+ // or activity implementation instead of a class.
+ options.RegisterWorkflow();
+
+ // These are the activities that get invoked by the workflow(s).
+ options.RegisterActivity();
+ options.RegisterActivity();
+ options.RegisterActivity();
+});
+
+WebApplication app = builder.Build();
+
+// POST starts new order workflow instance
+app.MapPost("/orders", async (DaprWorkflowClient client, [FromBody] OrderPayload orderInfo) =>
+{
+ if (orderInfo?.Name == null)
+ {
+ return Results.BadRequest(new
+ {
+ message = "Order data was missing from the request",
+ example = new OrderPayload("Paperclips", 99.95),
+ });
+ }
+
+//...
+});
+
+// GET fetches state for order workflow to report status
+app.MapGet("/orders/{orderId}", async (string orderId, DaprWorkflowClient client) =>
+{
+ WorkflowState state = await client.GetWorkflowStateAsync(orderId, true);
+ if (!state.Exists)
+ {
+ return Results.NotFound($"No order with ID = '{orderId}' was found.");
+ }
+
+ var httpResponsePayload = new
+ {
+ details = state.ReadInputAs(),
+ status = state.RuntimeStatus.ToString(),
+ result = state.ReadOutputAs(),
+ };
+
+//...
+}).WithName("GetOrderInfoEndpoint");
+
+app.Run();
+```
+
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+
+[As in the following example](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflow.java), a hello-world application using the Java SDK and Dapr Workflow would include:
+
+- A Java package called `io.dapr.workflows.client` to receive the Java SDK client capabilities.
+- An import of `io.dapr.workflows.Workflow`
+- The `DemoWorkflow` class which extends `Workflow`
+- Creating the workflow with input and output.
+- API calls. In the example below, these calls start and call the workflow activities.
+
+```java
+package io.dapr.examples.workflows;
+
+import com.microsoft.durabletask.CompositeTaskFailedException;
+import com.microsoft.durabletask.Task;
+import com.microsoft.durabletask.TaskCanceledException;
+import io.dapr.workflows.Workflow;
+import io.dapr.workflows.WorkflowStub;
+
+import java.time.Duration;
+import java.util.Arrays;
+import java.util.List;
+
+/**
+ * Implementation of the DemoWorkflow for the server side.
+ */
+public class DemoWorkflow extends Workflow {
+ @Override
+ public WorkflowStub create() {
+ return ctx -> {
+ ctx.getLogger().info("Starting Workflow: " + ctx.getName());
+ // ...
+ ctx.getLogger().info("Calling Activity...");
+ var input = new DemoActivityInput("Hello Activity!");
+ var output = ctx.callActivity(DemoWorkflowActivity.class.getName(), input, DemoActivityOutput.class).await();
+ // ...
+ };
+ }
+}
+```
+
+[See the full Java SDK workflow example in context.](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflow.java)
+
{{% /codetab %}}
@@ -377,5 +503,6 @@ Now that you've authored a workflow, learn how to manage it.
- [Workflow overview]({{< ref workflow-overview.md >}})
- [Workflow API reference]({{< ref workflow_api.md >}})
- Try out the full SDK examples:
- - [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Python example](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
+ - [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
+ - [Java example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-manage-workflow.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-manage-workflow.md
index 99cb87c9b58..fb7ad9d57c5 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-manage-workflow.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/howto-manage-workflow.md
@@ -6,45 +6,13 @@ weight: 6000
description: Manage and run workflows
---
-Now that you've [authored the workflow and its activities in your application]({{< ref howto-author-workflow.md >}}), you can start, terminate, and get information about the workflow using HTTP API calls. For more information, read the [workflow API reference]({{< ref workflow_api.md >}}).
-
-{{< tabs ".NET" Python HTTP >}}
-
-
-{{% codetab %}}
-
-Manage your workflow within your code. In the `OrderProcessingWorkflow` example from the [Author a workflow]({{< ref "howto-author-workflow.md#write-the-application" >}}) guide, the workflow is registered in the code. You can now start, terminate, and get information about a running workflow:
-
-```csharp
-string orderId = "exampleOrderId";
-string workflowComponent = "dapr";
-string workflowName = "OrderProcessingWorkflow";
-OrderPayload input = new OrderPayload("Paperclips", 99.95);
-Dictionary workflowOptions; // This is an optional parameter
-
-// Start the workflow. This returns back a "StartWorkflowResponse" which contains the instance ID for the particular workflow instance.
-StartWorkflowResponse startResponse = await daprClient.StartWorkflowAsync(orderId, workflowComponent, workflowName, input, workflowOptions);
-
-// Get information on the workflow. This response contains information such as the status of the workflow, when it started, and more!
-GetWorkflowResponse getResponse = await daprClient.GetWorkflowAsync(orderId, workflowComponent, workflowName);
-
-// Terminate the workflow
-await daprClient.TerminateWorkflowAsync(orderId, workflowComponent);
-
-// Raise an event (an incoming purchase order) that your workflow will wait for. This returns the item waiting to be purchased.
-await daprClient.RaiseWorkflowEventAsync(orderId, workflowComponent, workflowName, input);
-
-// Pause
-await daprClient.PauseWorkflowAsync(orderId, workflowComponent);
-
-// Resume
-await daprClient.ResumeWorkflowAsync(orderId, workflowComponent);
+{{% alert title="Note" color="primary" %}}
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
+{{% /alert %}}
-// Purge
-await daprClient.PurgeWorkflowAsync(orderId, workflowComponent);
-```
+Now that you've [authored the workflow and its activities in your application]({{< ref howto-author-workflow.md >}}), you can start, terminate, and get information about the workflow using HTTP API calls. For more information, read the [workflow API reference]({{< ref workflow_api.md >}}).
-{{% /codetab %}}
+{{< tabs Python ".NET" Java HTTP >}}
{{% codetab %}}
@@ -95,6 +63,107 @@ d.terminate_workflow(instance_id=instanceId, workflow_component=workflowComponen
{{% /codetab %}}
+
+{{% codetab %}}
+
+Manage your workflow within your code. In the `OrderProcessingWorkflow` example from the [Author a workflow]({{< ref "howto-author-workflow.md#write-the-application" >}}) guide, the workflow is registered in the code. You can now start, terminate, and get information about a running workflow:
+
+```csharp
+string orderId = "exampleOrderId";
+string workflowComponent = "dapr";
+string workflowName = "OrderProcessingWorkflow";
+OrderPayload input = new OrderPayload("Paperclips", 99.95);
+Dictionary workflowOptions; // This is an optional parameter
+
+// Start the workflow. This returns back a "StartWorkflowResponse" which contains the instance ID for the particular workflow instance.
+StartWorkflowResponse startResponse = await daprClient.StartWorkflowAsync(orderId, workflowComponent, workflowName, input, workflowOptions);
+
+// Get information on the workflow. This response contains information such as the status of the workflow, when it started, and more!
+GetWorkflowResponse getResponse = await daprClient.GetWorkflowAsync(orderId, workflowComponent, eventName);
+
+// Terminate the workflow
+await daprClient.TerminateWorkflowAsync(orderId, workflowComponent);
+
+// Raise an event (an incoming purchase order) that your workflow will wait for. This returns the item waiting to be purchased.
+await daprClient.RaiseWorkflowEventAsync(orderId, workflowComponent, workflowName, input);
+
+// Pause
+await daprClient.PauseWorkflowAsync(orderId, workflowComponent);
+
+// Resume
+await daprClient.ResumeWorkflowAsync(orderId, workflowComponent);
+
+// Purge the workflow, removing all inbox and history information from associated instance
+await daprClient.PurgeWorkflowAsync(orderId, workflowComponent);
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+Manage your workflow within your code. [In the workflow example from the Java SDK](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowClient.java), the workflow is registered in the code using the following APIs:
+
+- **scheduleNewWorkflow**: Starts a new workflow instance
+- **getInstanceState**: Get information on the status of the workflow
+- **waitForInstanceStart**: Pauses or suspends a workflow instance that can later be resumed
+- **raiseEvent**: Raises events/tasks for the running workflow instance
+- **waitForInstanceCompletion**: Waits for the workflow to complete its tasks
+- **purgeInstance**: Removes all metadata related to a specific workflow instance
+- **terminateWorkflow**: Terminates the workflow
+- **purgeInstance**: Removes all metadata related to a specific workflow
+
+```java
+package io.dapr.examples.workflows;
+
+import io.dapr.workflows.client.DaprWorkflowClient;
+import io.dapr.workflows.client.WorkflowInstanceStatus;
+
+// ...
+public class DemoWorkflowClient {
+
+ // ...
+ public static void main(String[] args) throws InterruptedException {
+ DaprWorkflowClient client = new DaprWorkflowClient();
+
+ try (client) {
+ // Start a workflow
+ String instanceId = client.scheduleNewWorkflow(DemoWorkflow.class, "input data");
+
+ // Get status information on the workflow
+ WorkflowInstanceStatus workflowMetadata = client.getInstanceState(instanceId, true);
+
+ // Wait or pause for the workflow instance start
+ try {
+ WorkflowInstanceStatus waitForInstanceStartResult =
+ client.waitForInstanceStart(instanceId, Duration.ofSeconds(60), true);
+ }
+
+ // Raise an event for the workflow; you can raise several events in parallel
+ client.raiseEvent(instanceId, "TestEvent", "TestEventPayload");
+ client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload");
+ client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload");
+ client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload");
+
+ // Wait for workflow to complete running through tasks
+ try {
+ WorkflowInstanceStatus waitForInstanceCompletionResult =
+ client.waitForInstanceCompletion(instanceId, Duration.ofSeconds(60), true);
+ }
+
+ // Purge the workflow instance, removing all metadata associated with it
+ boolean purgeResult = client.purgeInstance(instanceId);
+
+ // Terminate the workflow instance
+ client.terminateWorkflow(instanceToTerminateId, null);
+
+ System.exit(0);
+ }
+}
+```
+
+{{% /codetab %}}
+
{{% codetab %}}
@@ -106,7 +175,7 @@ Manage your workflow using HTTP calls. The example below plugs in the properties
To start your workflow with an ID `12345678`, run:
```http
-POST http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/start?instanceID=12345678
+POST http://localhost:3500/v1.0-beta1/workflows/dapr/OrderProcessingWorkflow/start?instanceID=12345678
```
Note that workflow instance IDs can only contain alphanumeric characters, underscores, and dashes.
@@ -116,7 +185,7 @@ Note that workflow instance IDs can only contain alphanumeric characters, unders
To terminate your workflow with an ID `12345678`, run:
```http
-POST http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678/terminate
+POST http://localhost:3500/v1.0-beta1/workflows/dapr/12345678/terminate
```
### Raise an event
@@ -124,7 +193,7 @@ POST http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678/terminate
For workflow components that support subscribing to external events, such as the Dapr Workflow engine, you can use the following "raise event" API to deliver a named event to a specific workflow instance.
```http
-POST http://localhost:3500/v1.0-alpha1/workflows///raiseEvent/
+POST http://localhost:3500/v1.0-beta1/workflows///raiseEvent/
```
> An `eventName` can be any function.
@@ -134,13 +203,13 @@ POST http://localhost:3500/v1.0-alpha1/workflows//}}).
@@ -172,6 +241,8 @@ Learn more about these HTTP calls in the [workflow API reference guide]({{< ref
## Next steps
- [Try out the Workflow quickstart]({{< ref workflow-quickstart.md >}})
- Try out the full SDK examples:
- - [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Python example](https://github.com/dapr/python-sdk/blob/master/examples/demo_workflow/app.py)
+ - [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
+ - [Java example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
+
- [Workflow API reference]({{< ref workflow_api.md >}})
diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-architecture.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-architecture.md
index da8bf0a44e1..18ec9110b30 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-architecture.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-architecture.md
@@ -6,6 +6,10 @@ weight: 4000
description: "The Dapr Workflow engine architecture"
---
+{{% alert title="Note" color="primary" %}}
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
+{{% /alert %}}
+
[Dapr Workflows]({{< ref "workflow-overview.md" >}}) allow developers to define workflows using ordinary code in a variety of programming languages. The workflow engine runs inside of the Dapr sidecar and orchestrates workflow code deployed as part of your application. This article describes:
- The architecture of the Dapr Workflow engine
@@ -189,4 +193,7 @@ See the [Reminder usage and execution guarantees section]({{< ref "workflow-arch
- [Workflow overview]({{< ref workflow-overview.md >}})
- [Workflow API reference]({{< ref workflow_api.md >}})
- [Try out the Workflow quickstart]({{< ref workflow-quickstart.md >}})
-- [Try out the .NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
+- Try out the following examples:
+ - [Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
+ - [.NET](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
+ - [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
\ No newline at end of file
diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md
index c8e3de13187..ce39d4bac96 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-features-concepts.md
@@ -6,6 +6,10 @@ weight: 2000
description: "Learn more about the Dapr Workflow features and concepts"
---
+{{% alert title="Note" color="primary" %}}
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
+{{% /alert %}}
+
Now that you've learned about the [workflow building block]({{< ref workflow-overview.md >}}) at a high level, let's deep dive into the features and concepts included with the Dapr Workflow engine and SDKs. Dapr Workflow exposes several core features and concepts which are common across all supported languages.
{{% alert title="Note" color="primary" %}}
@@ -158,7 +162,7 @@ APIs that generate random numbers, random UUIDs, or the current date are _non-de
For example, instead of this:
-{{< tabs ".NET" >}}
+{{< tabs ".NET" Java >}}
{{% codetab %}}
@@ -171,11 +175,22 @@ string randomString = GetRandomString();
{{% /codetab %}}
+{{% codetab %}}
+
+```java
+// DON'T DO THIS!
+Instant currentTime = Instant.now();
+UUID newIdentifier = UUID.randomUUID();
+string randomString = GetRandomString();
+```
+
+{{% /codetab %}}
+
{{< /tabs >}}
Do this:
-{{< tabs ".NET" >}}
+{{< tabs ".NET" Java >}}
{{% codetab %}}
@@ -188,6 +203,17 @@ string randomString = await context.CallActivityAsync("GetRandomString")
{{% /codetab %}}
+{{% codetab %}}
+
+```java
+// Do this!!
+Instant currentTime = context.getCurrentInstant();
+Guid newIdentifier = context.NewGuid();
+String randomString = context.callActivity(GetRandomString.class.getName(), String.class).await();
+```
+
+{{% /codetab %}}
+
{{< /tabs >}}
@@ -198,20 +224,58 @@ Instead, workflows should interact with external state _indirectly_ using workfl
For example, instead of this:
+{{< tabs ".NET" Java >}}
+
+{{% codetab %}}
+
```csharp
// DON'T DO THIS!
string configuration = Environment.GetEnvironmentVariable("MY_CONFIGURATION")!;
string data = await new HttpClient().GetStringAsync("https://example.com/api/data");
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+```java
+// DON'T DO THIS!
+String configuration = System.getenv("MY_CONFIGURATION");
+
+HttpRequest request = HttpRequest.newBuilder().uri(new URI("https://postman-echo.com/post")).GET().build();
+HttpResponse response = HttpClient.newBuilder().build().send(request, HttpResponse.BodyHandlers.ofString());
+```
+
+{{% /codetab %}}
+
+{{< /tabs >}}
Do this:
+{{< tabs ".NET" Java >}}
+
+{{% codetab %}}
+
```csharp
// Do this!!
string configuation = workflowInput.Configuration; // imaginary workflow input argument
string data = await context.CallActivityAsync("MakeHttpCall", "https://example.com/api/data");
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+```java
+// Do this!!
+String configuation = ctx.getInput(InputType.class).getConfiguration(); // imaginary workflow input argument
+String data = ctx.callActivity(MakeHttpCall.class, "https://example.com/api/data", String.class).await();
+```
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
+
#### Workflow functions must execute only on the workflow dispatch thread.
The implementation of each language SDK requires that all workflow function operations operate on the same thread (goroutine, etc.) that the function was scheduled on. Workflow functions must never:
- Schedule background threads, or
@@ -221,20 +285,58 @@ Failure to follow this rule could result in undefined behavior. Any background p
For example, instead of this:
+{{< tabs ".NET" Java >}}
+
+{{% codetab %}}
+
```csharp
// DON'T DO THIS!
Task t = Task.Run(() => context.CallActivityAsync("DoSomething"));
await context.CreateTimer(5000).ConfigureAwait(false);
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+```java
+// DON'T DO THIS!
+new Thread(() -> {
+ ctx.callActivity(DoSomethingActivity.class.getName()).await();
+}).start();
+ctx.createTimer(Duration.ofSeconds(5)).await();
+```
+
+{{% /codetab %}}
+
+{{< /tabs >}}
Do this:
+{{< tabs ".NET" Java >}}
+
+{{% codetab %}}
+
```csharp
// Do this!!
Task t = context.CallActivityAsync("DoSomething");
await context.CreateTimer(5000).ConfigureAwait(true);
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+```java
+// Do this!!
+ctx.callActivity(DoSomethingActivity.class.getName()).await();
+ctx.createTimer(Duration.ofSeconds(5)).await();
+```
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
+
### Updating workflow code
Make sure updates you make to the workflow code maintain its determinism. A couple examples of code updates that can break workflow determinism:
diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md
index 9f70500c01f..923bc37947e 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-overview.md
@@ -7,7 +7,7 @@ description: "Overview of Dapr Workflow"
---
{{% alert title="Note" color="primary" %}}
-Dapr Workflow is currently in alpha.
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "#limitations" >}}).
{{% /alert %}}
Dapr workflow makes it easy for developers to write business logic and integrations in a reliable way. Since Dapr workflows are stateful, they support long-running and fault-tolerant applications, ideal for orchestrating microservices. Dapr workflow works seamlessly with other Dapr building blocks, such as service invocation, pub/sub, state management, and bindings.
@@ -83,9 +83,9 @@ You can use the following SDKs to author a workflow.
| Language stack | Package |
| - | - |
-| .NET | [Dapr.Workflow](https://www.nuget.org/profiles/dapr.io) |
| Python | [dapr-ext-workflow](https://github.com/dapr/python-sdk/tree/master/ext/dapr-ext-workflow) |
-
+| .NET | [Dapr.Workflow](https://www.nuget.org/profiles/dapr.io) |
+| Java | [io.dapr.workflows](https://dapr.github.io/java-sdk/io/dapr/workflows/package-summary.html) |
## Try out workflows
@@ -95,15 +95,23 @@ Want to put workflows to the test? Walk through the following quickstart and tut
| Quickstart/tutorial | Description |
| ------------------- | ----------- |
-| [Workflow quickstart]({{< ref workflow-quickstart.md >}}) | Run a .NET workflow application with four workflow activities to see Dapr Workflow in action |
-| [Workflow .NET SDK example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow) | Learn how to create a Dapr Workflow and invoke it using ASP.NET Core web APIs. |
+| [Workflow quickstart]({{< ref workflow-quickstart.md >}}) | Run a workflow application with four workflow activities to see Dapr Workflow in action |
| [Workflow Python SDK example](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow) | Learn how to create a Dapr Workflow and invoke it using the Python `DaprClient` package. |
+| [Workflow .NET SDK example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow) | Learn how to create a Dapr Workflow and invoke it using ASP.NET Core web APIs. |
+| [Workflow Java SDK example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows) | Learn how to create a Dapr Workflow and invoke it using the Java `io.dapr.workflows` package. |
### Start using workflows directly in your app
Want to skip the quickstarts? Not a problem. You can try out the workflow building block directly in your application. After [Dapr is installed]({{< ref install-dapr-cli.md >}}), you can begin using workflows, starting with [how to author a workflow]({{< ref howto-author-workflow.md >}}).
+## Limitations
+
+With Dapr Workflow in beta stage comes the following limitation(s):
+
+- **State stores:** For the {{% dapr-latest-version cli="true" %}} beta release of Dapr Workflow, you're not able to use NoSQL databases. Only SQL databases are supported in the latest release.
+- **Application instances:** For the {{% dapr-latest-version cli="true" %}} beta release of Dapr Workflow, only a maximum of 2 application instances is supported.
+
## Watch the demo
Watch [this video for an overview on Dapr Workflow](https://youtu.be/s1p9MNl4VGo?t=131):
@@ -120,3 +128,4 @@ Watch [this video for an overview on Dapr Workflow](https://youtu.be/s1p9MNl4VGo
- Try out the full SDK examples:
- [.NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- [Python example](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
+ - [Java example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
diff --git a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md
index 4ff10782be4..9d23a64062f 100644
--- a/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md
+++ b/daprdocs/content/en/developing-applications/building-blocks/workflow/workflow-patterns.md
@@ -25,42 +25,7 @@ While the pattern is simple, there are many complexities hidden in the implement
Dapr Workflow solves these complexities by allowing you to implement the task chaining pattern concisely as a simple function in the programming language of your choice, as shown in the following example.
-{{< tabs ".NET" Python >}}
-
-{{% codetab %}}
-
-
-```csharp
-// Expotential backoff retry policy that survives long outages
-var retryOptions = new WorkflowTaskOptions
-{
- RetryPolicy = new WorkflowRetryPolicy(
- firstRetryInterval: TimeSpan.FromMinutes(1),
- backoffCoefficient: 2.0,
- maxRetryInterval: TimeSpan.FromHours(1),
- maxNumberOfAttempts: 10),
-};
-
-try
-{
- var result1 = await context.CallActivityAsync("Step1", wfInput, retryOptions);
- var result2 = await context.CallActivityAsync("Step2", result1, retryOptions);
- var result3 = await context.CallActivityAsync("Step3", result2, retryOptions);
- return string.Join(", ", result4);
-}
-catch (TaskFailedException) // Task failures are surfaced as TaskFailedException
-{
- // Retries expired - apply custom compensation logic
- await context.CallActivityAsync("MyCompensation", options: retryOptions);
- throw;
-}
-```
-
-{{% alert title="Note" color="primary" %}}
-In the example above, `"Step1"`, `"Step2"`, `"Step3"`, and `"MyCompensation"` represent workflow activities, which are functions in your code that actually implement the steps of the workflow. For brevity, these activity implementations are left out of this example.
-{{% /alert %}}
-
-{{% /codetab %}}
+{{< tabs Python ".NET" Java >}}
{{% codetab %}}
@@ -103,9 +68,63 @@ def error_handler(ctx, error):
# Do some compensating work
```
-{{% alert title="Note" color="primary" %}}
-Workflow retry policies will be available in a future version of the Python SDK.
-{{% /alert %}}
+> **Note** Workflow retry policies will be available in a future version of the Python SDK.
+
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```csharp
+// Expotential backoff retry policy that survives long outages
+var retryOptions = new WorkflowTaskOptions
+{
+ RetryPolicy = new WorkflowRetryPolicy(
+ firstRetryInterval: TimeSpan.FromMinutes(1),
+ backoffCoefficient: 2.0,
+ maxRetryInterval: TimeSpan.FromHours(1),
+ maxNumberOfAttempts: 10),
+};
+
+try
+{
+ var result1 = await context.CallActivityAsync("Step1", wfInput, retryOptions);
+ var result2 = await context.CallActivityAsync("Step2", result1, retryOptions);
+ var result3 = await context.CallActivityAsync("Step3", result2, retryOptions);
+ return string.Join(", ", result4);
+}
+catch (TaskFailedException) // Task failures are surfaced as TaskFailedException
+{
+ // Retries expired - apply custom compensation logic
+ await context.CallActivityAsync("MyCompensation", options: retryOptions);
+ throw;
+}
+```
+
+> **Note** In the example above, `"Step1"`, `"Step2"`, `"Step3"`, and `"MyCompensation"` represent workflow activities, which are functions in your code that actually implement the steps of the workflow. For brevity, these activity implementations are left out of this example.
+
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```java
+public static void main(String[] args) throws InterruptedException {
+ DaprWorkflowClient client = new DaprWorkflowClient();
+
+ try (client) {
+ client.raiseEvent(instanceId, "TestEvent", "TestEventPayload");
+
+ System.out.println(separatorStr);
+ System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **");
+ client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload");
+ client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload");
+ client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload");
+ System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId);
+
+ }
+}
+```
{{% /codetab %}}
@@ -135,32 +154,7 @@ In addition to the challenges mentioned in [the previous pattern]({{< ref "workf
Dapr Workflows provides a way to express the fan-out/fan-in pattern as a simple function, as shown in the following example:
-{{< tabs ".NET" Python >}}
-
-{{% codetab %}}
-
-
-```csharp
-// Get a list of N work items to process in parallel.
-object[] workBatch = await context.CallActivityAsync("GetWorkBatch", null);
-
-// Schedule the parallel tasks, but don't wait for them to complete yet.
-var parallelTasks = new List>(workBatch.Length);
-for (int i = 0; i < workBatch.Length; i++)
-{
- Task task = context.CallActivityAsync("ProcessWorkItem", workBatch[i]);
- parallelTasks.Add(task);
-}
-
-// Everything is scheduled. Wait here until all parallel tasks have completed.
-await Task.WhenAll(parallelTasks);
-
-// Aggregate all N outputs and publish the result.
-int sum = parallelTasks.Sum(t => t.Result);
-await context.CallActivityAsync("PostResults", sum);
-```
-
-{{% /codetab %}}
+{{< tabs Python ".NET" Java >}}
{{% codetab %}}
@@ -202,6 +196,80 @@ def process_results(ctx, final_result: int):
{{% /codetab %}}
+{{% codetab %}}
+
+
+```csharp
+// Get a list of N work items to process in parallel.
+object[] workBatch = await context.CallActivityAsync("GetWorkBatch", null);
+
+// Schedule the parallel tasks, but don't wait for them to complete yet.
+var parallelTasks = new List>(workBatch.Length);
+for (int i = 0; i < workBatch.Length; i++)
+{
+ Task task = context.CallActivityAsync("ProcessWorkItem", workBatch[i]);
+ parallelTasks.Add(task);
+}
+
+// Everything is scheduled. Wait here until all parallel tasks have completed.
+await Task.WhenAll(parallelTasks);
+
+// Aggregate all N outputs and publish the result.
+int sum = parallelTasks.Sum(t => t.Result);
+await context.CallActivityAsync("PostResults", sum);
+```
+
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```java
+public static void main(String[] args) throws InterruptedException {
+ DaprWorkflowClient client = new DaprWorkflowClient();
+
+ try (client) {
+
+ System.out.println(separatorStr);
+ System.out.println("**SendExternalMessage**");
+ client.raiseEvent(instanceId, "TestEvent", "TestEventPayload");
+
+ // Get events to process in parallel
+ System.out.println(separatorStr);
+ System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **");
+ client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload");
+ client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload");
+ client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload");
+ System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId);
+
+ // Register the raised events to be captured
+ System.out.println(separatorStr);
+ System.out.println("** Registering Event to be captured by anyOf(t1,t2,t3) **");
+ client.raiseEvent(instanceId, "e2", "event 2 Payload");
+ System.out.printf("Event raised for workflow with instanceId: %s\n", instanceId);
+
+ // Wait for all tasks to complete and aggregate results
+ System.out.println(separatorStr);
+ System.out.println("**WaitForInstanceCompletion**");
+ try {
+ WorkflowInstanceStatus waitForInstanceCompletionResult =
+ client.waitForInstanceCompletion(instanceId, Duration.ofSeconds(60), true);
+ System.out.printf("Result: %s%n", waitForInstanceCompletionResult);
+ } catch (TimeoutException ex) {
+ System.out.printf("waitForInstanceCompletion has an exception:%s%n", ex);
+ }
+
+ System.out.println(separatorStr);
+ System.out.println("**purgeInstance**");
+ boolean purgeResult = client.purgeInstance(instanceId);
+ System.out.printf("purgeResult: %s%n", purgeResult);
+
+ }
+}
+```
+
+{{% /codetab %}}
+
{{< /tabs >}}
The key takeaways from this example are:
@@ -233,7 +301,7 @@ The Dapr workflow HTTP API supports the asynchronous request-reply pattern out-o
The following `curl` commands illustrate how the workflow APIs support this pattern.
```bash
-curl -X POST http://localhost:3500/v1.0-alpha1/workflows/dapr/OrderProcessingWorkflow/start?instanceID=12345678 -d '{"Name":"Paperclips","Quantity":1,"TotalCost":9.95}'
+curl -X POST http://localhost:3500/v1.0-beta1/workflows/dapr/OrderProcessingWorkflow/start?instanceID=12345678 -d '{"Name":"Paperclips","Quantity":1,"TotalCost":9.95}'
```
The previous command will result in the following response JSON:
@@ -245,7 +313,7 @@ The previous command will result in the following response JSON:
The HTTP client can then construct the status query URL using the workflow instance ID and poll it repeatedly until it sees the "COMPLETE", "FAILURE", or "TERMINATED" status in the payload.
```bash
-curl http://localhost:3500/v1.0-alpha1/workflows/dapr/12345678
+curl http://localhost:3500/v1.0-beta1/workflows/dapr/12345678
```
The following is an example of what an in-progress workflow status might look like.
@@ -302,7 +370,54 @@ Depending on the business needs, there may be a single monitor or there may be m
Dapr Workflow supports this pattern natively by allowing you to implement _eternal workflows_. Rather than writing infinite while-loops ([which is an anti-pattern]({{< ref "workflow-features-concepts.md#infinite-loops-and-eternal-workflows" >}})), Dapr Workflow exposes a _continue-as-new_ API that workflow authors can use to restart a workflow function from the beginning with a new input.
-{{< tabs ".NET" Python >}}
+{{< tabs Python ".NET" Java >}}
+
+{{% codetab %}}
+
+
+```python
+from dataclasses import dataclass
+from datetime import timedelta
+import random
+import dapr.ext.workflow as wf
+
+
+@dataclass
+class JobStatus:
+ job_id: str
+ is_healthy: bool
+
+
+def status_monitor_workflow(ctx: wf.DaprWorkflowContext, job: JobStatus):
+ # poll a status endpoint associated with this job
+ status = yield ctx.call_activity(check_status, input=job)
+ if not ctx.is_replaying:
+ print(f"Job '{job.job_id}' is {status}.")
+
+ if status == "healthy":
+ job.is_healthy = True
+ next_sleep_interval = 60 # check less frequently when healthy
+ else:
+ if job.is_healthy:
+ job.is_healthy = False
+ ctx.call_activity(send_alert, input=f"Job '{job.job_id}' is unhealthy!")
+ next_sleep_interval = 5 # check more frequently when unhealthy
+
+ yield ctx.create_timer(fire_at=ctx.current_utc_datetime + timedelta(seconds=next_sleep_interval))
+
+ # restart from the beginning with a new JobStatus input
+ ctx.continue_as_new(job)
+
+
+def check_status(ctx, _) -> str:
+ return random.choice(["healthy", "unhealthy"])
+
+
+def send_alert(ctx, message: str):
+ print(f'*** Alert: {message}')
+```
+
+{{% /codetab %}}
{{% codetab %}}
@@ -346,48 +461,44 @@ public override async Task RunAsync(WorkflowContext context, MyEntitySta
{{% /codetab %}}
{{% codetab %}}
-
-
-```python
-from dataclasses import dataclass
-from datetime import timedelta
-import random
-import dapr.ext.workflow as wf
-
-
-@dataclass
-class JobStatus:
- job_id: str
- is_healthy: bool
-
+
-def status_monitor_workflow(ctx: wf.DaprWorkflowContext, job: JobStatus):
- # poll a status endpoint associated with this job
- status = yield ctx.call_activity(check_status, input=job)
- if not ctx.is_replaying:
- print(f"Job '{job.job_id}' is {status}.")
-
- if status == "healthy":
- job.is_healthy = True
- next_sleep_interval = 60 # check less frequently when healthy
- else:
- if job.is_healthy:
- job.is_healthy = False
- ctx.call_activity(send_alert, input=f"Job '{job.job_id}' is unhealthy!")
- next_sleep_interval = 5 # check more frequently when unhealthy
+```java
+public class MonitorWorkflow extends Workflow {
- yield ctx.create_timer(fire_at=ctx.current_utc_datetime + timedelta(seconds=next_sleep_interval))
+ @Override
+ public WorkflowStub create() {
+ return ctx -> {
- # restart from the beginning with a new JobStatus input
- ctx.continue_as_new(job)
+ Duration nextSleepInterval;
+ var status = ctx.callActivity(DemoWorkflowStatusActivity.class.getName(), DemoStatusActivityOutput.class).await();
+ var isHealthy = status.getIsHealthy();
-def check_status(ctx, _) -> str:
- return random.choice(["healthy", "unhealthy"])
+ if (isHealthy) {
+ // Check less frequently when in a healthy state
+ nextSleepInterval = Duration.ofMinutes(60);
+ } else {
+ ctx.callActivity(DemoWorkflowAlertActivity.class.getName()).await();
-def send_alert(ctx, message: str):
- print(f'*** Alert: {message}')
+ // Check more frequently when in an unhealthy state
+ nextSleepInterval = Duration.ofMinutes(5);
+ }
+
+ // Put the workflow to sleep until the determined time
+ // Note: ctx.createTimer() method is not supported in the Java SDK yet
+ try {
+ TimeUnit.SECONDS.sleep(nextSleepInterval.getSeconds());
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+ // Restart from the beginning with the updated state
+ ctx.continueAsNew();
+ }
+ }
+}
```
{{% /codetab %}}
@@ -420,53 +531,7 @@ The following diagram illustrates this flow.
The following example code shows how this pattern can be implemented using Dapr Workflow.
-{{< tabs ".NET" Python >}}
-
-{{% codetab %}}
-
-
-```csharp
-public override async Task RunAsync(WorkflowContext context, OrderPayload order)
-{
- // ...(other steps)...
-
- // Require orders over a certain threshold to be approved
- if (order.TotalCost > OrderApprovalThreshold)
- {
- try
- {
- // Request human approval for this order
- await context.CallActivityAsync(nameof(RequestApprovalActivity), order);
-
- // Pause and wait for a human to approve the order
- ApprovalResult approvalResult = await context.WaitForExternalEventAsync(
- eventName: "ManagerApproval",
- timeout: TimeSpan.FromDays(3));
- if (approvalResult == ApprovalResult.Rejected)
- {
- // The order was rejected, end the workflow here
- return new OrderResult(Processed: false);
- }
- }
- catch (TaskCanceledException)
- {
- // An approval timeout results in automatic order cancellation
- return new OrderResult(Processed: false);
- }
- }
-
- // ...(other steps)...
-
- // End the workflow with a success result
- return new OrderResult(Processed: true);
-}
-```
-
-{{% alert title="Note" color="primary" %}}
-In the example above, `RequestApprovalActivity` is the name of a workflow activity to invoke and `ApprovalResult` is an enumeration defined by the workflow app. For brevity, these definitions were left out of the example code.
-{{% /alert %}}
-
-{{% /codetab %}}
+{{< tabs Python ".NET" Java >}}
{{% codetab %}}
@@ -527,26 +592,101 @@ def place_order(_, order: Order) -> None:
{{% /codetab %}}
-{{< /tabs >}}
+{{% codetab %}}
+
-The code that delivers the event to resume the workflow execution is external to the workflow. Workflow events can be delivered to a waiting workflow instance using the [raise event]({{< ref "howto-manage-workflow.md#raise-an-event" >}}) workflow management API, as shown in the following example:
+```csharp
+public override async Task RunAsync(WorkflowContext context, OrderPayload order)
+{
+ // ...(other steps)...
+
+ // Require orders over a certain threshold to be approved
+ if (order.TotalCost > OrderApprovalThreshold)
+ {
+ try
+ {
+ // Request human approval for this order
+ await context.CallActivityAsync(nameof(RequestApprovalActivity), order);
+
+ // Pause and wait for a human to approve the order
+ ApprovalResult approvalResult = await context.WaitForExternalEventAsync(
+ eventName: "ManagerApproval",
+ timeout: TimeSpan.FromDays(3));
+ if (approvalResult == ApprovalResult.Rejected)
+ {
+ // The order was rejected, end the workflow here
+ return new OrderResult(Processed: false);
+ }
+ }
+ catch (TaskCanceledException)
+ {
+ // An approval timeout results in automatic order cancellation
+ return new OrderResult(Processed: false);
+ }
+ }
-{{< tabs ".NET" Python >}}
+ // ...(other steps)...
+
+ // End the workflow with a success result
+ return new OrderResult(Processed: true);
+}
+```
+
+> **Note** In the example above, `RequestApprovalActivity` is the name of a workflow activity to invoke and `ApprovalResult` is an enumeration defined by the workflow app. For brevity, these definitions were left out of the example code.
+
+{{% /codetab %}}
{{% codetab %}}
-
+
+
+```java
+public static void main(String[] args) throws InterruptedException {
+ DaprWorkflowClient client = new DaprWorkflowClient();
+
+ try (client) {
+ String eventInstanceId = client.scheduleNewWorkflow(DemoWorkflow.class);
+ System.out.printf("Started new workflow instance with random ID: %s%n", eventInstanceId);
+ client.raiseEvent(eventInstanceId, "TestException", null);
+ System.out.printf("Event raised for workflow with instanceId: %s\n", eventInstanceId);
+
+ System.out.println(separatorStr);
+ String instanceToTerminateId = "terminateMe";
+ client.scheduleNewWorkflow(DemoWorkflow.class, null, instanceToTerminateId);
+ System.out.printf("Started new workflow instance with specified ID: %s%n", instanceToTerminateId);
+
+ TimeUnit.SECONDS.sleep(5);
+ System.out.println("Terminate this workflow instance manually before the timeout is reached");
+ client.terminateWorkflow(instanceToTerminateId, null);
+ System.out.println(separatorStr);
+
+ String restartingInstanceId = "restarting";
+ client.scheduleNewWorkflow(DemoWorkflow.class, null, restartingInstanceId);
+ System.out.printf("Started new workflow instance with ID: %s%n", restartingInstanceId);
+ System.out.println("Sleeping 30 seconds to restart the workflow");
+ TimeUnit.SECONDS.sleep(30);
+
+ System.out.println("**SendExternalMessage: RestartEvent**");
+ client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload");
+
+ System.out.println("Sleeping 30 seconds to terminate the eternal workflow");
+ TimeUnit.SECONDS.sleep(30);
+ client.terminateWorkflow(restartingInstanceId, null);
+ }
-```csharp
-// Raise the workflow event to the waiting workflow
-await daprClient.RaiseWorkflowEventAsync(
- instanceId: orderId,
- workflowComponent: "dapr",
- eventName: "ManagerApproval",
- eventData: ApprovalResult.Approved);
+ System.out.println("Exiting DemoWorkflowClient.");
+ System.exit(0);
+
+}
```
{{% /codetab %}}
+{{< /tabs >}}
+
+The code that delivers the event to resume the workflow execution is external to the workflow. Workflow events can be delivered to a waiting workflow instance using the [raise event]({{< ref "howto-manage-workflow.md#raise-an-event" >}}) workflow management API, as shown in the following example:
+
+{{< tabs Python ".NET" Java >}}
+
{{% codetab %}}
@@ -564,6 +704,30 @@ with DaprClient() as d:
{{% /codetab %}}
+{{% codetab %}}
+
+
+```csharp
+// Raise the workflow event to the waiting workflow
+await daprClient.RaiseWorkflowEventAsync(
+ instanceId: orderId,
+ workflowComponent: "dapr",
+ eventName: "ManagerApproval",
+ eventData: ApprovalResult.Approved);
+```
+
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```java
+System.out.println("**SendExternalMessage: RestartEvent**");
+client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload");
+```
+
+{{% /codetab %}}
+
{{< /tabs >}}
External events don't have to be directly triggered by humans. They can also be triggered by other systems. For example, a workflow may need to pause and wait for a payment to be received. In this case, a payment system might publish an event to a pub/sub topic on receipt of a payment, and a listener on that topic can raise an event to the workflow using the raise event workflow API.
@@ -577,4 +741,7 @@ External events don't have to be directly triggered by humans. They can also be
- [Try out Dapr Workflows using the quickstart]({{< ref workflow-quickstart.md >}})
- [Workflow overview]({{< ref workflow-overview.md >}})
- [Workflow API reference]({{< ref workflow_api.md >}})
-- [Try out the .NET example](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
+- Try out the following examples:
+ - [Python](https://github.com/dapr/python-sdk/tree/master/examples/demo_workflow)
+ - [.NET](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
+ - [Java](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows)
\ No newline at end of file
diff --git a/daprdocs/content/en/developing-applications/develop-components/pluggable-components/develop-pluggable.md b/daprdocs/content/en/developing-applications/develop-components/pluggable-components/develop-pluggable.md
index d25ff95d7a1..5868eabffd0 100644
--- a/daprdocs/content/en/developing-applications/develop-components/pluggable-components/develop-pluggable.md
+++ b/daprdocs/content/en/developing-applications/develop-components/pluggable-components/develop-pluggable.md
@@ -14,19 +14,21 @@ In order to implement a pluggable component, you need to implement a gRPC servic
### Find the proto definition file
-Proto definitions are provided for each supported service interface (state store, pub/sub, bindings).
+Proto definitions are provided for each supported service interface (state store, pub/sub, bindings, secret stores).
Currently, the following component APIs are supported:
- State stores
- Pub/sub
- Bindings
+- Secret stores
| Component | Type | gRPC definition | Built-in Reference Implementation | Docs |
| :---------: | :--------: | :--------------: | :----------------------------------------------------------------------------: | --------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| State Store | `state` | [state.proto] | [Redis](https://github.com/dapr/components-contrib/tree/master/state/redis) | [concept]({{< ref "state-management-overview" >}}), [howto]({{< ref "howto-get-save-state" >}}), [api spec]({{< ref "state_api" >}}) |
-| Pub/sub | `pubsub` | [pubsub.proto] | [Redis](https://github.com/dapr/components-contrib/tree/master/pubsub/redis) | [concept]({{< ref "pubsub-overview" >}}), [howto]({{< ref "howto-publish-subscribe" >}}), [api spec]({{< ref "pubsub_api" >}}) |
-| Bindings | `bindings` | [bindings.proto] | [Kafka](https://github.com/dapr/components-contrib/tree/master/bindings/kafka) | [concept]({{< ref "bindings-overview" >}}), [input howto]({{< ref "howto-triggers" >}}), [output howto]({{< ref "howto-bindings" >}}), [api spec]({{< ref "bindings_api" >}}) |
+| State Store | `state` | [state.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/state.proto) | [Redis](https://github.com/dapr/components-contrib/tree/master/state/redis) | [concept]({{< ref "state-management-overview" >}}), [howto]({{< ref "howto-get-save-state" >}}), [api spec]({{< ref "state_api" >}}) |
+| Pub/sub | `pubsub` | [pubsub.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/pubsub.proto) | [Redis](https://github.com/dapr/components-contrib/tree/master/pubsub/redis) | [concept]({{< ref "pubsub-overview" >}}), [howto]({{< ref "howto-publish-subscribe" >}}), [api spec]({{< ref "pubsub_api" >}}) |
+| Bindings | `bindings` | [bindings.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/bindings.proto) | [Kafka](https://github.com/dapr/components-contrib/tree/master/bindings/kafka) | [concept]({{< ref "bindings-overview" >}}), [input howto]({{< ref "howto-triggers" >}}), [output howto]({{< ref "howto-bindings" >}}), [api spec]({{< ref "bindings_api" >}}) |
+| Secret Store | `secretstores` | [secretstore.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/secretstore.proto) | [Hashicorp/Vault](https://github.com/dapr/components-contrib/blob/master/secretstores/hashicorp/vault/vault.go) | [concept]({{< ref "secrets-overview" >}}), [howto-secrets]({{< ref "howto-secrets" >}}), [api spec]({{< ref "secrets_api" >}}) |
Below is a snippet of the gRPC service definition for pluggable component state stores ([state.proto]):
@@ -95,11 +97,15 @@ Provide a concrete implementation of the desired service. Each component has a g
- **Pub/sub**
- Pluggable pub/sub components only have a single core service interface defined ([pubsub.proto]). They have no optional service interfaces.
+ Pluggable pub/sub components only have a single core service interface defined [pubsub.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/pubsub.proto). They have no optional service interfaces.
- **Bindings**
- Pluggable input and output bindings have a single core service definition on [bindings.proto]. They have no optional service interfaces.
+ Pluggable input and output bindings have a single core service definition on [bindings.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/bindings.proto). They have no optional service interfaces.
+
+- **Secret Store**
+
+ Pluggable Secret store have a single core service definition on [secretstore.proto](https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/secretstore.proto). They have no optional service interfaces.
After generating the above state store example's service scaffolding code using gRPC and protocol buffers tools, you can define concrete implementations for the 9 methods defined under `service StateStore`, along with code to initialize and communicate with your dependencies.
diff --git a/daprdocs/content/en/developing-applications/develop-components/pluggable-components/pluggable-components-overview.md b/daprdocs/content/en/developing-applications/develop-components/pluggable-components/pluggable-components-overview.md
index 96b1260cc41..bdee6b540d6 100644
--- a/daprdocs/content/en/developing-applications/develop-components/pluggable-components/pluggable-components-overview.md
+++ b/daprdocs/content/en/developing-applications/develop-components/pluggable-components/pluggable-components-overview.md
@@ -63,7 +63,3 @@ In contrast, pluggable components require additional steps before they can commu
- [Implement a pluggable component]({{< ref develop-pluggable.md >}})
- [Pluggable component registration]({{< ref "pluggable-components-registration" >}})
-
-[state.proto]: https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/state.proto
-[pubsub.proto]: https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/pubsub.proto
-[bindings.proto]: https://github.com/dapr/dapr/blob/master/dapr/proto/components/v1/bindings.proto
diff --git a/daprdocs/content/en/developing-applications/integrations/Azure/azure-functions.md b/daprdocs/content/en/developing-applications/integrations/Azure/azure-functions.md
index 7fe9dd013ad..0de36d3eb4e 100644
--- a/daprdocs/content/en/developing-applications/integrations/Azure/azure-functions.md
+++ b/daprdocs/content/en/developing-applications/integrations/Azure/azure-functions.md
@@ -7,12 +7,13 @@ weight: 3000
---
{{% alert title="Note" color="primary" %}}
-The Dapr Functions extension is currently in preview.
+The Dapr extension for Azure Functions is currently in preview.
{{% /alert %}}
+Dapr integrates with the [Azure Functions runtime](https://learn.microsoft.com/azure/azure-functions/functions-overview) via an extension that lets a function seamlessly interact with Dapr.
+- **Azure Functions** provides an event-driven programming model.
+- **Dapr** provides cloud-native building blocks.
-Dapr integrates with the [Azure Functions runtime](https://learn.microsoft.com/azure/azure-functions/functions-overview) via an extension that lets a function seamlessly interact with Dapr. Azure Functions provides an event-driven programming model and Dapr provides cloud-native building blocks. The extension combines the two for serverless and event-driven apps.
+The extension combines the two for serverless and event-driven apps.
-Try out the [Dapr Functions extension](https://github.com/dapr/azure-functions-extension) samples.
-
-{{< button text="Learn more about the Dapr Function extension in preview" link="https://cloudblogs.microsoft.com/opensource/2020/07/01/announcing-azure-functions-extension-for-dapr/" >}}
+{{< button text="Try out the Dapr extension for Azure Functions" link="https://learn.microsoft.com/azure/azure-functions/functions-bindings-dapr" >}}
diff --git a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-overview.md b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-overview.md
index 5fac41131e7..4e48d8a09e4 100644
--- a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-overview.md
+++ b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-overview.md
@@ -7,23 +7,24 @@ description: Run multiple applications with one CLI command
---
{{% alert title="Note" color="primary" %}}
- Multi-App Run is currently a preview feature only supported in Linux/MacOS.
+ Multi-App Run for **Kubernetes** is currently a preview feature.
{{% /alert %}}
-Let's say you want to run several applications locally to test them together, similar to a production scenario. With a local Kubernetes cluster, you'd be able to do this with helm/deployment YAML files. You'd also have to build them as containers and set up Kubernetes, which can add some complexity.
+Let's say you want to run several applications locally to test them together, similar to a production scenario. Multi-App Run allows you to start and stop a set of applications simultaneously, either:
+- Locally/self-hosted with processes, or
+- By building container images and deploying to a Kubernetes cluster
+ - You can use a local Kubernetes cluster (KiND) or one deploy to a Cloud (AKS, EKS, and GKE).
-Instead, you simply want to run them as local executables in self-hosted mode. However, self-hosted mode requires you to:
+The Multi-App Run template file describes how to start multiple applications as if you had run many separate CLI `run` commands. By default, this template file is called `dapr.yaml`.
-- Run multiple `dapr run` commands
-- Keep track of all ports opened (you cannot have duplicate ports for different applications).
-- Remember the resources folders and configuration files that each application refers to.
-- Recall all of the additional flags you used to tweak the `dapr run` command behavior (`--app-health-check-path`, `--dapr-grpc-port`, `--unix-domain-socket`, etc.)
+{{< tabs Self-hosted Kubernetes>}}
-With Multi-App Run, you can start multiple applications in self-hosted mode using a single `dapr run -f` command using a template file. The template file describes how to start multiple applications as if you had run many separate CLI `run`commands. By default, this template file is called `dapr.yaml`.
+{{% codetab %}}
+
## Multi-App Run template file
-When you execute `dapr run -f .`, it uses the multi-app template file (named `dapr.yaml`) present in the current directory to run all the applications.
+When you execute `dapr run -f .`, it starts the multi-app template file (named `dapr.yaml`) present in the current directory to run all the applications.
You can name template file with preferred name other than the default. For example `dapr run -f ./.yaml`.
@@ -40,7 +41,7 @@ apps:
- appID: emit-metrics
appDirPath: ../apps/emit-metrics/
daprHTTPPort: 3511
- env:
+ env:
DAPR_HOST_ADD: localhost
command: ["go","run", "app.go"]
```
@@ -49,7 +50,7 @@ For a more in-depth example and explanation of the template properties, see [Mul
## Locations for resources and configuration files
-You have options on where to place your applications' resources and configuration files when using Multi-App Run.
+You have options on where to place your applications' resources and configuration files when using Multi-App Run.
### Point to one file location (with convention)
@@ -71,9 +72,9 @@ The run template provides two log destination fields for each application and it
1. `appLogDestination` : This field configures the log destination for the application. The possible values are `console`, `file` and `fileAndConsole`. The default value is `fileAndConsole` where application logs are written to both console and to a file by default.
-2. `daprdLogDestination` : This field configures the log destination for the `daprd` process. The possible values are `console`, `file` and `fileAndConsole`. The default value is `file` where the `daprd` logs are written to a file by default.
+1. `daprdLogDestination` : This field configures the log destination for the `daprd` process. The possible values are `console`, `file` and `fileAndConsole`. The default value is `file` where the `daprd` logs are written to a file by default.
-#### Log file format
+### Log file format
Logs for application and `daprd` are captured in separate files. These log files are created automatically under `.dapr/logs` directory under each application directory (`appDirPath` in the template). These log file names follow the pattern seen below:
@@ -82,14 +83,90 @@ Logs for application and `daprd` are captured in separate files. These log files
Even if you've decided to rename your resources folder to something other than `.dapr`, the log files are written only to the `.dapr/logs` folder (created in the application directory).
-
## Watch the demo
Watch [this video for an overview on Multi-App Run](https://youtu.be/s1p9MNl4VGo?t=2456):
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+## Multi-App Run template file
+
+When you execute `dapr run -k -f .` or `dapr run -k -f dapr.yaml`, the applications defined in the `dapr.yaml` Multi-App Run template file starts in Kubernetes default namespace.
+
+> **Note:** Currently, the Multi-App Run template can only start applications in the default Kubernetes namespace.
+
+The necessary default service and deployment definitions for Kubernetes are generated within the `.dapr/deploy` folder for each app in the `dapr.yaml` template.
+
+If the `createService` field is set to `true` in the `dapr.yaml` template for an app, then the `service.yaml` file is generated in the `.dapr/deploy` folder of the app.
+
+Otherwise, only the `deployment.yaml` file is generated for each app that has the `containerImage` field set.
+
+The files `service.yaml` and `deployment.yaml` are used to deploy the applications in `default` namespace in Kubernetes. This feature is specifically targeted only for running multiple apps in a dev/test environment in Kubernetes.
+
+You can name the template file with any preferred name other than the default. For example:
+
+```bash
+dapr run -k -f ./.yaml
+```
+
+The following example includes some of the template properties you can customize for your applications. In the example, you can simultaneously launch 2 applications with app IDs of `nodeapp` and `pythonapp`.
+
+```yaml
+version: 1
+common:
+apps:
+ - appID: nodeapp
+ appDirPath: ./nodeapp/
+ appPort: 3000
+ containerImage: ghcr.io/dapr/samples/hello-k8s-node:latest
+ createService: true
+ env:
+ APP_PORT: 3000
+ - appID: pythonapp
+ appDirPath: ./pythonapp/
+ containerImage: ghcr.io/dapr/samples/hello-k8s-python:latest
+```
+
+> **Note:**
+> - If the `containerImage` field is not specified, `dapr run -k -f` produces an error.
+> - The `createService` field defines a basic service in Kubernetes (ClusterIP or LoadBalancer) that targets the `--app-port` specified in the template. If `createService` isn't specified, the application is not accessible from outside the cluster.
+
+For a more in-depth example and explanation of the template properties, see [Multi-app template]({{< ref multi-app-template.md >}}).
+
+## Logs
+
+The run template provides two log destination fields for each application and its associated daprd process:
+
+1. `appLogDestination` : This field configures the log destination for the application. The possible values are `console`, `file` and `fileAndConsole`. The default value is `fileAndConsole` where application logs are written to both console and to a file by default.
+
+2. `daprdLogDestination` : This field configures the log destination for the `daprd` process. The possible values are `console`, `file` and `fileAndConsole`. The default value is `file` where the `daprd` logs are written to a file by default.
+
+### Log file format
+
+Logs for application and `daprd` are captured in separate files. These log files are created automatically under `.dapr/logs` directory under each application directory (`appDirPath` in the template). These log file names follow the pattern seen below:
+
+- `_app_.log` (file name format for `app` log)
+- `_daprd_.log` (file name format for `daprd` log)
+
+Even if you've decided to rename your resources folder to something other than `.dapr`, the log files are written only to the `.dapr/logs` folder (created in the application directory).
+
+## Watch the demo
+
+Watch [this video for an overview on Multi-App Run in Kubernetes](https://youtu.be/nWatANwaAik?si=O8XR-TUaiY0gclgO&t=1024):
+
+
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
## Next steps
- [Learn the Multi-App Run template file structure and its properties]({{< ref multi-app-template.md >}})
-- [Try out the Multi-App Run template with the Service Invocation quickstart]({{< ref serviceinvocation-quickstart.md >}})
\ No newline at end of file
+- [Try out the self-hosted Multi-App Run template with the Service Invocation quickstart]({{< ref serviceinvocation-quickstart.md >}})
+- [Try out the Kubernetes Multi-App Run template with the `hello-kubernetes` tutorial](https://github.com/dapr/quickstarts/tree/master/tutorials/hello-kubernetes)
\ No newline at end of file
diff --git a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md
index 8bca3008036..350ef0f4219 100644
--- a/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md
+++ b/daprdocs/content/en/developing-applications/local-development/multi-app-dapr-run/multi-app-template.md
@@ -7,7 +7,7 @@ description: Unpack the Multi-App Run template file and its properties
---
{{% alert title="Note" color="primary" %}}
- Multi-App Run is currently a preview feature only supported in Linux/MacOS.
+ Multi-App Run for **Kubernetes** is currently a preview feature.
{{% /alert %}}
The Multi-App Run template file is a YAML file that you can use to run multiple applications at once. In this guide, you'll learn how to:
@@ -26,20 +26,53 @@ When you provide a directory path, the CLI will try to locate the Multi-App Run
Execute the following CLI command to read the Multi-App Run template file, named `dapr.yaml` by default:
+{{< tabs Self-hosted Kubernetes>}}
+
+{{% codetab %}}
+
+
```cmd
# the template file needs to be called `dapr.yaml` by default if a directory path is given
dapr run -f
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```cmd
+dapr run -f -k
+```
+{{% /codetab %}}
+
+{{< /tabs >}}
### Execute by providing a file path
If the Multi-App Run template file is named something other than `dapr.yaml`, then you can provide the relative or absolute file path to the command:
+{{< tabs Self-hosted Kubernetes>}}
+
+{{% codetab %}}
+
+
```cmd
dapr run -f ./path/to/.yaml
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```cmd
+dapr run -f -k ./path/to/.yaml
+```
+{{% /codetab %}}
+
+{{< /tabs >}}
+
## View the started applications
Once the multi-app template is running, you can view the started applications with the following command:
@@ -52,6 +85,11 @@ dapr list
Stop the multi-app run template anytime with either of the following commands:
+{{< tabs Self-hosted Kubernetes>}}
+
+{{% codetab %}}
+
+
```cmd
# the template file needs to be called `dapr.yaml` by default if a directory path is given
@@ -63,10 +101,36 @@ or:
dapr stop -f ./path/to/.yaml
```
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```cmd
+# the template file needs to be called `dapr.yaml` by default if a directory path is given
+
+dapr stop -f -k
+```
+or:
+
+```cmd
+dapr stop -f -k ./path/to/.yaml
+```
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
+
## Template file structure
The Multi-App Run template file can include the following properties. Below is an example template showing two applications that are configured with some of the properties.
+{{< tabs Self-hosted Kubernetes>}}
+
+{{% codetab %}}
+
+
```yaml
version: 1
common: # optional section for variables shared across apps
@@ -92,23 +156,65 @@ apps:
appPort: 3000
unixDomainSocket: "/tmp/test-socket"
env:
- - DEBUG: false
+ DEBUG: false
command: ["./backend"]
```
-{{% alert title="Important" color="warning" %}}
The following rules apply for all the paths present in the template file:
- If the path is absolute, it is used as is.
- - All relative paths under comman section should be provided relative to the template file path.
+ - All relative paths under command section should be provided relative to the template file path.
- `appDirPath` under apps section should be provided relative to the template file path.
- - All relative paths under app section should be provided relative to the appDirPath.
+ - All relative paths under app section should be provided relative to the `appDirPath`.
-{{% /alert %}}
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+```yaml
+version: 1
+common: # optional section for variables shared across apps
+ env: # any environment variable shared across apps
+ DEBUG: true
+apps:
+ - appID: webapp # optional
+ appDirPath: .dapr/webapp/ # REQUIRED
+ appChannelAddress: 127.0.0.1 # network address where the app listens on. (optional) can be left to default value by convention.
+ appProtocol: http
+ appPort: 8080
+ appHealthCheckPath: "/healthz"
+ appLogDestination: file # (optional), can be file, console or fileAndConsole. default is fileAndConsole.
+ daprdLogDestination: file # (optional), can be file, console or fileAndConsole. default is file.
+ containerImage: ghcr.io/dapr/samples/hello-k8s-node:latest # (optional) URI of the container image to be used when deploying to Kubernetes dev/test environment.
+ createService: true # (optional) Create a Kubernetes service for the application when deploying to dev/test environment.
+ - appID: backend # optional
+ appDirPath: .dapr/backend/ # REQUIRED
+ appProtocol: grpc
+ appPort: 3000
+ unixDomainSocket: "/tmp/test-socket"
+ env:
+ DEBUG: false
+```
+
+The following rules apply for all the paths present in the template file:
+ - If the path is absolute, it is used as is.
+ - `appDirPath` under apps section should be provided relative to the template file path.
+ - All relative paths under app section should be provided relative to the `appDirPath`.
+
+{{% /codetab %}}
+
+{{< /tabs >}}
## Template properties
+{{< tabs Self-hosted Kubernetes>}}
+
+{{% codetab %}}
+
+
The properties for the Multi-App Run template align with the `dapr run` CLI flags, [listed in the CLI reference documentation]({{< ref "dapr-run.md#flags" >}}).
+{{< table "table table-white table-striped table-bordered" >}}
| Properties | Required | Details | Example |
|--------------------------|:--------:|--------|---------|
@@ -146,8 +252,66 @@ The properties for the Multi-App Run template align with the `dapr run` CLI flag
| `appLogDestination` | N | Log destination for outputting app logs; Its value can be file, console or fileAndConsole. Default is fileAndConsole | `file`, `console`, `fileAndConsole` |
| `daprdLogDestination` | N | Log destination for outputting daprd logs; Its value can be file, console or fileAndConsole. Default is file | `file`, `console`, `fileAndConsole` |
+{{< /table >}}
+
## Next steps
Watch [this video for an overview on Multi-App Run](https://youtu.be/s1p9MNl4VGo?t=2456):
+{{% /codetab %}}
+
+{{% codetab %}}
+
+
+The properties for the Multi-App Run template align with the `dapr run -k` CLI flags, [listed in the CLI reference documentation]({{< ref "dapr-run.md#flags" >}}).
+
+{{< table "table table-white table-striped table-bordered" >}}
+
+| Properties | Required | Details | Example |
+|--------------------------|:--------:|--------|---------|
+| `appDirPath` | Y | Path to the your application code | `./webapp/`, `./backend/` |
+| `appID` | N | Application's app ID. If not provided, will be derived from `appDirPath` | `webapp`, `backend` |
+| `appChannelAddress` | N | The network address the application listens on. Can be left to the default value by convention. | `127.0.0.1` | `localhost` |
+| `appProtocol` | N | The protocol Dapr uses to talk to the application. | `http`, `grpc` |
+| `appPort` | N | The port your application is listening on | `8080`, `3000` |
+| `daprHTTPPort` | N | Dapr HTTP port | |
+| `daprGRPCPort` | N | Dapr GRPC port | |
+| `daprInternalGRPCPort` | N | gRPC port for the Dapr Internal API to listen on; used when parsing the value from a local DNS component | |
+| `metricsPort` | N | The port that Dapr sends its metrics information to | |
+| `unixDomainSocket` | N | Path to a unix domain socket dir mount. If specified, communication with the Dapr sidecar uses unix domain sockets for lower latency and greater throughput when compared to using TCP ports. Not available on Windows. | `/tmp/test-socket` |
+| `profilePort` | N | The port for the profile server to listen on | |
+| `enableProfiling` | N | Enable profiling via an HTTP endpoint | |
+| `apiListenAddresses` | N | Dapr API listen addresses | |
+| `logLevel` | N | The log verbosity. | |
+| `appMaxConcurrency` | N | The concurrency level of the application; default is unlimited | |
+| `placementHostAddress` | N | | |
+| `appSSL` | N | Enable https when Dapr invokes the application | |
+| `daprHTTPMaxRequestSize` | N | Max size of the request body in MB. | |
+| `daprHTTPReadBufferSize` | N | Max size of the HTTP read buffer in KB. This also limits the maximum size of HTTP headers. The default 4 KB | |
+| `enableAppHealthCheck` | N | Enable the app health check on the application | `true`, `false` |
+| `appHealthCheckPath` | N | Path to the health check file | `/healthz` |
+| `appHealthProbeInterval` | N | Interval to probe for the health of the app in seconds
+ | |
+| `appHealthProbeTimeout` | N | Timeout for app health probes in milliseconds | |
+| `appHealthThreshold` | N | Number of consecutive failures for the app to be considered unhealthy | |
+| `enableApiLogging` | N | Enable the logging of all API calls from application to Dapr | |
+| `env` | N | Map to environment variable; environment variables applied per application will overwrite environment variables shared across applications | `DEBUG`, `DAPR_HOST_ADD` |
+| `appLogDestination` | N | Log destination for outputting app logs; Its value can be file, console or fileAndConsole. Default is fileAndConsole | `file`, `console`, `fileAndConsole` |
+| `daprdLogDestination` | N | Log destination for outputting daprd logs; Its value can be file, console or fileAndConsole. Default is file | `file`, `console`, `fileAndConsole` |
+| `containerImage`| N | URI of the container image to be used when deploying to Kubernetes dev/test environment. | `ghcr.io/dapr/samples/hello-k8s-python:latest`
+| `createService`| N | Create a Kubernetes service for the application when deploying to dev/test environment. | `true`, `false` |
+
+{{< /table >}}
+
+## Next steps
+
+Watch [this video for an overview on Multi-App Run in Kubernetes](https://youtu.be/nWatANwaAik?si=O8XR-TUaiY0gclgO&t=1024):
+
+
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
+
diff --git a/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md
index b818c434836..cbfa248c94b 100644
--- a/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md
+++ b/daprdocs/content/en/getting-started/quickstarts/bindings-quickstart.md
@@ -11,7 +11,7 @@ Let's take a look at Dapr's [Bindings building block]({{< ref bindings >}}). Usi
- Trigger your app with events coming in from external systems.
- Interface with external systems.
-In this Quickstart, you will schedule a batch script to run every 10 seconds using an input [Cron]({{< ref cron.md >}}) binding. The script processes a JSON file and outputs data to a SQL database using the [PostgreSQL]({{< ref postgresql.md >}}) Dapr binding.
+In this Quickstart, you schedule a batch script to run every 10 seconds using an input [Cron]({{< ref cron.md >}}) binding. The script processes a JSON file and outputs data to a SQL database using the [PostgreSQL]({{< ref postgresql.md >}}) Dapr binding.
diff --git a/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md
index cb6096c519b..61306891165 100644
--- a/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md
+++ b/daprdocs/content/en/getting-started/quickstarts/pubsub-quickstart.md
@@ -14,6 +14,757 @@ Let's take a look at Dapr's [Publish and Subscribe (Pub/sub) building block]({{<
+You can try out this pub/sub quickstart by either:
+
+- [Running all applications in this sample simultaneously with the Multi-App Run template file]({{< ref "#run-using-multi-app-run" >}}), or
+- [Running one application at a time]({{< ref "#run-one-application-at-a-time" >}})
+
+## Run using Multi-App Run
+
+Select your preferred language-specific Dapr SDK before proceeding with the Quickstart.
+
+{{< tabs "Python" "JavaScript" ".NET" "Java" "Go" >}}
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Python 3.7+ installed](https://www.python.org/downloads/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/pub_sub).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstarts directory, navigate into the pub/sub directory:
+
+```bash
+cd pub_sub/python/sdk
+```
+
+### Step 3: Run the publisher and subscriber
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` subscriber
+- The `checkout` publisher
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - checkout-sdk == Published data: Order { OrderId = 1 }
+== APP - order-processor == Subscriber received : Order { OrderId = 1 }
+== APP - checkout-sdk == Published data: Order { OrderId = 2 }
+== APP - order-processor == Subscriber received : Order { OrderId = 2 }
+== APP - checkout-sdk == Published data: Order { OrderId = 3 }
+== APP - order-processor == Subscriber received : Order { OrderId = 3 }
+== APP - checkout-sdk == Published data: Order { OrderId = 4 }
+== APP - order-processor == Subscriber received : Order { OrderId = 4 }
+== APP - checkout-sdk == Published data: Order { OrderId = 5 }
+== APP - order-processor == Subscriber received : Order { OrderId = 5 }
+== APP - checkout-sdk == Published data: Order { OrderId = 6 }
+== APP - order-processor == Subscriber received : Order { OrderId = 6 }
+== APP - checkout-sdk == Published data: Order { OrderId = 7 }
+== APP - order-processor == Subscriber received : Order { OrderId = 7 }
+== APP - checkout-sdk == Published data: Order { OrderId = 8 }
+== APP - order-processor == Subscriber received : Order { OrderId = 8 }
+== APP - checkout-sdk == Published data: Order { OrderId = 9 }
+== APP - order-processor == Subscriber received : Order { OrderId = 9 }
+== APP - checkout-sdk == Published data: Order { OrderId = 10 }
+== APP - order-processor == Subscriber received : Order { OrderId = 10 }
+Exited App successfully
+```
+
+### What happened?
+
+When you ran `dapr init` during Dapr install, the following YAML files were generated in the `.dapr/components` directory:
+- [`dapr.yaml` Multi-App Run template file]({{< ref "#dapryaml-multi-app-run-template-file" >}})
+- [`pubsub.yaml` component file]({{< ref "#pubsubyaml-component-file" >}})
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-subscriber" >}}) and [publisher]({{< ref "#checkout-publisher" >}}) applications.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../components/
+apps:
+ - appID: order-processor-sdk
+ appDirPath: ./order-processor/
+ appPort: 6001
+ command: ["uvicorn", "app:app"]
+ - appID: checkout-sdk
+ appDirPath: ./checkout/
+ command: ["python3", "app.py"]
+```
+
+##### `pubsub.yaml` component file
+
+With the `pubsub.yaml` component, you can easily swap out underlying components without application code changes.
+
+The Redis `pubsub.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: orderpubsub
+spec:
+ type: pubsub.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+```
+
+In the component YAML file:
+
+- `metadata/name` is how your application talks to the component.
+- `spec/metadata` defines the connection to the instance of the component.
+- `scopes` specify which application can use the component.
+
+##### `order-processor` subscriber
+
+In the `order-processor` subscriber, you subscribe to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. This enables your app code to talk to the Redis component instance through the Dapr sidecar.
+
+```py
+# Register Dapr pub/sub subscriptions
+@app.route('/dapr/subscribe', methods=['GET'])
+def subscribe():
+ subscriptions = [{
+ 'pubsubname': 'orderpubsub',
+ 'topic': 'orders',
+ 'route': 'orders'
+ }]
+ print('Dapr pub/sub is subscribed to: ' + json.dumps(subscriptions))
+ return jsonify(subscriptions)
+
+
+# Dapr subscription in /dapr/subscribe sets up this route
+@app.route('/orders', methods=['POST'])
+def orders_subscriber():
+ event = from_http(request.headers, request.get_data())
+ print('Subscriber received : ' + event.data['orderid'], flush=True)
+ return json.dumps({'success': True}), 200, {
+ 'ContentType': 'application/json'}
+
+
+app.run(port=5001)
+```
+
+##### `checkout` publisher
+
+In the `checkout` publisher, you publish the orderId message to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. As soon as the service starts, it publishes in a loop:
+
+```python
+with DaprClient() as client:
+ # Publish an event/message using Dapr PubSub
+ result = client.publish_event(
+ pubsub_name='orderpubsub',
+ topic_name='orders',
+ data=json.dumps(order),
+ data_content_type='application/json',
+ )
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Latest Node.js installed](https://nodejs.org/download/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/pub_sub).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstarts directory, navigate into the pub/sub directory:
+
+```bash
+cd pub_sub/javascript/sdk
+```
+
+### Step 3: Run the publisher and subscriber
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` subscriber
+- The `checkout` publisher
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - checkout-sdk == Published data: Order { OrderId = 1 }
+== APP - order-processor == Subscriber received : Order { OrderId = 1 }
+== APP - checkout-sdk == Published data: Order { OrderId = 2 }
+== APP - order-processor == Subscriber received : Order { OrderId = 2 }
+== APP - checkout-sdk == Published data: Order { OrderId = 3 }
+== APP - order-processor == Subscriber received : Order { OrderId = 3 }
+== APP - checkout-sdk == Published data: Order { OrderId = 4 }
+== APP - order-processor == Subscriber received : Order { OrderId = 4 }
+== APP - checkout-sdk == Published data: Order { OrderId = 5 }
+== APP - order-processor == Subscriber received : Order { OrderId = 5 }
+== APP - checkout-sdk == Published data: Order { OrderId = 6 }
+== APP - order-processor == Subscriber received : Order { OrderId = 6 }
+== APP - checkout-sdk == Published data: Order { OrderId = 7 }
+== APP - order-processor == Subscriber received : Order { OrderId = 7 }
+== APP - checkout-sdk == Published data: Order { OrderId = 8 }
+== APP - order-processor == Subscriber received : Order { OrderId = 8 }
+== APP - checkout-sdk == Published data: Order { OrderId = 9 }
+== APP - order-processor == Subscriber received : Order { OrderId = 9 }
+== APP - checkout-sdk == Published data: Order { OrderId = 10 }
+== APP - order-processor == Subscriber received : Order { OrderId = 10 }
+Exited App successfully
+```
+
+### What happened?
+
+When you ran `dapr init` during Dapr install, the following YAML files were generated in the `.dapr/components` directory:
+- [`dapr.yaml` Multi-App Run template file]({{< ref "#dapryaml-multi-app-run-template-file" >}})
+- [`pubsub.yaml` component file]({{< ref "#pubsubyaml-component-file" >}})
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-subscriber" >}}) and [publisher]({{< ref "#checkout-publisher" >}}) applications.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../components/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ appPort: 5002
+ command: ["npm", "run", "start"]
+ - appID: checkout-sdk
+ appDirPath: ./checkout/
+ command: ["npm", "run", "start"]
+```
+
+##### `pubsub.yaml` component file
+
+With the `pubsub.yaml` component, you can easily swap out underlying components without application code changes.
+
+The Redis `pubsub.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: orderpubsub
+spec:
+ type: pubsub.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+```
+
+In the component YAML file:
+
+- `metadata/name` is how your application talks to the component.
+- `spec/metadata` defines the connection to the instance of the component.
+- `scopes` specify which application can use the component.
+
+##### `order-processor` subscriber
+
+In the `order-processor` subscriber, you subscribe to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. This enables your app code to talk to the Redis component instance through the Dapr sidecar.
+
+```js
+server.pubsub.subscribe("orderpubsub", "orders", (data) => console.log("Subscriber received: " + JSON.stringify(data)));
+```
+
+##### `checkout` publisher
+
+In the `checkout` publisher service, you publish the orderId message to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. As soon as the service starts, it publishes in a loop:
+
+```js
+const client = new DaprClient();
+
+await client.pubsub.publish(PUBSUB_NAME, PUBSUB_TOPIC, order);
+console.log("Published data: " + JSON.stringify(order));
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [.NET SDK or .NET 6 SDK installed](https://dotnet.microsoft.com/download).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/pub_sub).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstarts directory, navigate into the pub/sub directory:
+
+```bash
+cd pub_sub/csharp/sdk
+```
+
+### Step 3: Run the publisher and subscriber
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` subscriber
+- The `checkout` publisher
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - checkout-sdk == Published data: Order { OrderId = 1 }
+== APP - order-processor == Subscriber received : Order { OrderId = 1 }
+== APP - checkout-sdk == Published data: Order { OrderId = 2 }
+== APP - order-processor == Subscriber received : Order { OrderId = 2 }
+== APP - checkout-sdk == Published data: Order { OrderId = 3 }
+== APP - order-processor == Subscriber received : Order { OrderId = 3 }
+== APP - checkout-sdk == Published data: Order { OrderId = 4 }
+== APP - order-processor == Subscriber received : Order { OrderId = 4 }
+== APP - checkout-sdk == Published data: Order { OrderId = 5 }
+== APP - order-processor == Subscriber received : Order { OrderId = 5 }
+== APP - checkout-sdk == Published data: Order { OrderId = 6 }
+== APP - order-processor == Subscriber received : Order { OrderId = 6 }
+== APP - checkout-sdk == Published data: Order { OrderId = 7 }
+== APP - order-processor == Subscriber received : Order { OrderId = 7 }
+== APP - checkout-sdk == Published data: Order { OrderId = 8 }
+== APP - order-processor == Subscriber received : Order { OrderId = 8 }
+== APP - checkout-sdk == Published data: Order { OrderId = 9 }
+== APP - order-processor == Subscriber received : Order { OrderId = 9 }
+== APP - checkout-sdk == Published data: Order { OrderId = 10 }
+== APP - order-processor == Subscriber received : Order { OrderId = 10 }
+Exited App successfully
+```
+
+### What happened?
+
+When you ran `dapr init` during Dapr install, the following YAML files were generated in the `.dapr/components` directory:
+- [`dapr.yaml` Multi-App Run template file]({{< ref "#dapryaml-multi-app-run-template-file" >}})
+- [`pubsub.yaml` component file]({{< ref "#pubsubyaml-component-file" >}})
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-subscriber" >}}) and [publisher]({{< ref "#checkout-publisher" >}}) applications.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../components/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ appPort: 7006
+ command: ["dotnet", "run"]
+ - appID: checkout-sdk
+ appDirPath: ./checkout/
+ command: ["dotnet", "run"]
+```
+
+##### `pubsub.yaml` component file
+
+With the `pubsub.yaml` component, you can easily swap out underlying components without application code changes.
+
+The Redis `pubsub.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: orderpubsub
+spec:
+ type: pubsub.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+```
+
+In the component YAML file:
+
+- `metadata/name` is how your application talks to the component.
+- `spec/metadata` defines the connection to the instance of the component.
+- `scopes` specify which application can use the component.
+
+##### `order-processor` subscriber
+
+In the `order-processor` subscriber, you subscribe to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. This enables your app code to talk to the Redis component instance through the Dapr sidecar.
+
+```cs
+// Dapr subscription in [Topic] routes orders topic to this route
+app.MapPost("/orders", [Topic("orderpubsub", "orders")] (Order order) => {
+ Console.WriteLine("Subscriber received : " + order);
+ return Results.Ok(order);
+});
+
+public record Order([property: JsonPropertyName("orderId")] int OrderId);
+```
+
+##### `checkout` publisher
+
+In the `checkout` publisher, you publish the orderId message to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. As soon as the service starts, it publishes in a loop:
+
+```cs
+using var client = new DaprClientBuilder().Build();
+await client.PublishEventAsync("orderpubsub", "orders", order);
+Console.WriteLine("Published data: " + order);
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- Java JDK 11 (or greater):
+ - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or
+ - OpenJDK
+- [Apache Maven](https://maven.apache.org/install.html), version 3.x.
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/pub_sub).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstarts directory, navigate into the pub/sub directory:
+
+```bash
+cd pub_sub/java/sdk
+```
+
+### Step 3: Run the publisher and subscriber
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` subscriber
+- The `checkout` publisher
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - checkout-sdk == Published data: Order { OrderId = 1 }
+== APP - order-processor == Subscriber received : Order { OrderId = 1 }
+== APP - checkout-sdk == Published data: Order { OrderId = 2 }
+== APP - order-processor == Subscriber received : Order { OrderId = 2 }
+== APP - checkout-sdk == Published data: Order { OrderId = 3 }
+== APP - order-processor == Subscriber received : Order { OrderId = 3 }
+== APP - checkout-sdk == Published data: Order { OrderId = 4 }
+== APP - order-processor == Subscriber received : Order { OrderId = 4 }
+== APP - checkout-sdk == Published data: Order { OrderId = 5 }
+== APP - order-processor == Subscriber received : Order { OrderId = 5 }
+== APP - checkout-sdk == Published data: Order { OrderId = 6 }
+== APP - order-processor == Subscriber received : Order { OrderId = 6 }
+== APP - checkout-sdk == Published data: Order { OrderId = 7 }
+== APP - order-processor == Subscriber received : Order { OrderId = 7 }
+== APP - checkout-sdk == Published data: Order { OrderId = 8 }
+== APP - order-processor == Subscriber received : Order { OrderId = 8 }
+== APP - checkout-sdk == Published data: Order { OrderId = 9 }
+== APP - order-processor == Subscriber received : Order { OrderId = 9 }
+== APP - checkout-sdk == Published data: Order { OrderId = 10 }
+== APP - order-processor == Subscriber received : Order { OrderId = 10 }
+Exited App successfully
+```
+
+### What happened?
+
+When you ran `dapr init` during Dapr install, the following YAML files were generated in the `.dapr/components` directory:
+- [`dapr.yaml` Multi-App Run template file]({{< ref "#dapryaml-multi-app-run-template-file" >}})
+- [`pubsub.yaml` component file]({{< ref "#pubsubyaml-component-file" >}})
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-subscriber" >}}) and [publisher]({{< ref "#checkout-publisher" >}}) applications.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../components/
+apps:
+ - appID: order-processor-sdk
+ appDirPath: ./order-processor/target/
+ appPort: 8080
+ command: ["java", "-jar", "OrderProcessingService-0.0.1-SNAPSHOT.jar"]
+ - appID: checkout-sdk
+ appDirPath: ./checkout/target/
+ command: ["java", "-jar", "CheckoutService-0.0.1-SNAPSHOT.jar"]
+```
+
+##### `pubsub.yaml` component file
+
+With the `pubsub.yaml` component, you can easily swap out underlying components without application code changes.
+
+The Redis `pubsub.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: orderpubsub
+spec:
+ type: pubsub.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+```
+
+In the component YAML file:
+
+- `metadata/name` is how your application talks to the component.
+- `spec/metadata` defines the connection to the instance of the component.
+- `scopes` specify which application can use the component.
+
+##### `order-processor` subscriber
+
+In the `order-processor` subscriber, you subscribe to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. This enables your app code to talk to the Redis component instance through the Dapr sidecar.
+
+```java
+@Topic(name = "orders", pubsubName = "orderpubsub")
+@PostMapping(path = "/orders", consumes = MediaType.ALL_VALUE)
+public Mono getCheckout(@RequestBody(required = false) CloudEvent cloudEvent) {
+ return Mono.fromSupplier(() -> {
+ try {
+ logger.info("Subscriber received: " + cloudEvent.getData().getOrderId());
+ return ResponseEntity.ok("SUCCESS");
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+}
+```
+
+##### `checkout` publisher
+
+In the `checkout` publisher, you publish the orderId message to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. As soon as the service starts, it publishes in a loop:
+
+```java
+DaprClient client = new DaprClientBuilder().build();
+client.publishEvent(
+ PUBSUB_NAME,
+ TOPIC_NAME,
+ order).block();
+logger.info("Published data: " + order.getOrderId());
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Latest version of Go](https://go.dev/dl/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/pub_sub).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstarts directory, navigate into the pub/sub directory:
+
+```bash
+cd pub_sub/go/sdk
+```
+
+### Step 3: Run the publisher and subscriber
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` subscriber
+- The `checkout` publisher
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - checkout-sdk == Published data: Order { OrderId = 1 }
+== APP - order-processor == Subscriber received : Order { OrderId = 1 }
+== APP - checkout-sdk == Published data: Order { OrderId = 2 }
+== APP - order-processor == Subscriber received : Order { OrderId = 2 }
+== APP - checkout-sdk == Published data: Order { OrderId = 3 }
+== APP - order-processor == Subscriber received : Order { OrderId = 3 }
+== APP - checkout-sdk == Published data: Order { OrderId = 4 }
+== APP - order-processor == Subscriber received : Order { OrderId = 4 }
+== APP - checkout-sdk == Published data: Order { OrderId = 5 }
+== APP - order-processor == Subscriber received : Order { OrderId = 5 }
+== APP - checkout-sdk == Published data: Order { OrderId = 6 }
+== APP - order-processor == Subscriber received : Order { OrderId = 6 }
+== APP - checkout-sdk == Published data: Order { OrderId = 7 }
+== APP - order-processor == Subscriber received : Order { OrderId = 7 }
+== APP - checkout-sdk == Published data: Order { OrderId = 8 }
+== APP - order-processor == Subscriber received : Order { OrderId = 8 }
+== APP - checkout-sdk == Published data: Order { OrderId = 9 }
+== APP - order-processor == Subscriber received : Order { OrderId = 9 }
+== APP - checkout-sdk == Published data: Order { OrderId = 10 }
+== APP - order-processor == Subscriber received : Order { OrderId = 10 }
+Exited App successfully
+```
+
+### What happened?
+
+When you ran `dapr init` during Dapr install, the following YAML files were generated in the `.dapr/components` directory:
+- [`dapr.yaml` Multi-App Run template file]({{< ref "#dapryaml-multi-app-run-template-file" >}})
+- [`pubsub.yaml` component file]({{< ref "#pubsubyaml-component-file" >}})
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-subscriber" >}}) and [publisher]({{< ref "#checkout-publisher" >}}) applications.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../components/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ appPort: 6005
+ command: ["go", "run", "."]
+ - appID: checkout-sdk
+ appDirPath: ./checkout/
+ command: ["go", "run", "."]
+```
+
+##### `pubsub.yaml` component file
+
+With the `pubsub.yaml` component, you can easily swap out underlying components without application code changes.
+
+The Redis `pubsub.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: orderpubsub
+spec:
+ type: pubsub.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+```
+
+In the component YAML file:
+
+- `metadata/name` is how your application talks to the component.
+- `spec/metadata` defines the connection to the instance of the component.
+- `scopes` specify which application can use the component.
+
+##### `order-processor` subscriber
+
+In the `order-processor` subscriber, you subscribe to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. This enables your app code to talk to the Redis component instance through the Dapr sidecar.
+
+```go
+func eventHandler(ctx context.Context, e *common.TopicEvent) (retry bool, err error) {
+ fmt.Println("Subscriber received: ", e.Data)
+ return false, nil
+}
+```
+
+##### `checkout` publisher
+
+In the `checkout` publisher, you publish the orderId message to the Redis instance called `orderpubsub` [(as defined in the `pubsub.yaml` component)]({{< ref "#pubsubyaml-component-file" >}}) and topic `orders`. As soon as the service starts, it publishes in a loop:
+
+```go
+client, err := dapr.NewClient()
+
+if err := client.PublishEvent(ctx, PUBSUB_NAME, PUBSUB_TOPIC, []byte(order)); err != nil {
+ panic(err)
+}
+
+fmt.Println("Published data: ", order)
+```
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
+## Run one application at a time
+
Select your preferred language-specific Dapr SDK before proceeding with the Quickstart.
{{< tabs "Python" "JavaScript" ".NET" "Java" "Go" >}}
@@ -157,7 +908,7 @@ Subscriber output:
== APP == INFO:root:Subscriber received: {"orderId": 10}
```
-#### `pubsub.yaml` component file
+##### `pubsub.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `pubsub.yaml` and runs a Redis container on your local machine, located:
@@ -315,7 +1066,7 @@ Subscriber output:
```
-#### `pubsub.yaml` component file
+##### `pubsub.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `pubsub.yaml` and runs a Redis container on your local machine, located:
@@ -468,7 +1219,7 @@ Subscriber output:
== APP == Subscriber received: Order { OrderId = 10 }
```
-#### `pubsub.yaml` component file
+##### `pubsub.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `pubsub.yaml` and runs a Redis container on your local machine, located:
@@ -630,7 +1381,7 @@ Subscriber output:
== APP == 2022-03-07 13:31:37.919 INFO 43512 --- [nio-8080-exec-2] c.s.c.OrderProcessingServiceController : Subscriber received: 10
```
-#### `pubsub.yaml` component file
+##### `pubsub.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `pubsub.yaml` and runs a Redis container on your local machine, located:
@@ -788,7 +1539,7 @@ Subscriber output:
Note: the order in which they are received may vary.
-#### `pubsub.yaml` component file
+##### `pubsub.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `pubsub.yaml` and runs a Redis container on your local machine, located:
diff --git a/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md
index fc61df703cf..c33c529f952 100644
--- a/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md
+++ b/daprdocs/content/en/getting-started/quickstarts/serviceinvocation-quickstart.md
@@ -10,10 +10,678 @@ With [Dapr's Service Invocation building block](https://docs.dapr.io/developing-
-Dapr offers several methods for service invocation, which you can choose depending on your scenario. For this Quickstart, you'll enable the checkout service to invoke a method using HTTP proxy in the order-processor service.
+Dapr offers several methods for service invocation, which you can choose depending on your scenario. For this Quickstart, you'll enable the checkout service to invoke a method using HTTP proxy in the order-processor service and by either:
+- [Running all applications in this sample simultaneously with the Multi-App Run template file]({{< ref "#run-using-multi-app-run" >}}), or
+- [Running one application at a time]({{< ref "#run-one-application-at-a-time" >}})
Learn more about Dapr's methods for service invocation in the [overview article]({{< ref service-invocation-overview.md >}}).
+## Run using Multi-App Run
+
+Select your preferred language before proceeding with the Quickstart.
+
+{{< tabs "Python" "JavaScript" ".NET" "Java" "Go" >}}
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Python 3.7+ installed](https://www.python.org/downloads/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/service_invocation).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstart clone directory, navigate to the quickstart directory.
+
+```bash
+cd service_invocation/python/http
+```
+
+### Step 3: Run the `order-processor` and `checkout` services
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` service
+- The `checkout` service
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - order-processor == Order received : Order { orderId = 1 }
+== APP - checkout == Order passed: Order { OrderId = 1 }
+== APP - order-processor == Order received : Order { orderId = 2 }
+== APP - checkout == Order passed: Order { OrderId = 2 }
+== APP - order-processor == Order received : Order { orderId = 3 }
+== APP - checkout == Order passed: Order { OrderId = 3 }
+== APP - order-processor == Order received : Order { orderId = 4 }
+== APP - checkout == Order passed: Order { OrderId = 4 }
+== APP - order-processor == Order received : Order { orderId = 5 }
+== APP - checkout == Order passed: Order { OrderId = 5 }
+== APP - order-processor == Order received : Order { orderId = 6 }
+== APP - checkout == Order passed: Order { OrderId = 6 }
+== APP - order-processor == Order received : Order { orderId = 7 }
+== APP - checkout == Order passed: Order { OrderId = 7 }
+== APP - order-processor == Order received : Order { orderId = 8 }
+== APP - checkout == Order passed: Order { OrderId = 8 }
+== APP - order-processor == Order received : Order { orderId = 9 }
+== APP - checkout == Order passed: Order { OrderId = 9 }
+== APP - order-processor == Order received : Order { orderId = 10 }
+== APP - checkout == Order passed: Order { OrderId = 10 }
+== APP - order-processor == Order received : Order { orderId = 11 }
+== APP - checkout == Order passed: Order { OrderId = 11 }
+== APP - order-processor == Order received : Order { orderId = 12 }
+== APP - checkout == Order passed: Order { OrderId = 12 }
+== APP - order-processor == Order received : Order { orderId = 13 }
+== APP - checkout == Order passed: Order { OrderId = 13 }
+== APP - order-processor == Order received : Order { orderId = 14 }
+== APP - checkout == Order passed: Order { OrderId = 14 }
+== APP - order-processor == Order received : Order { orderId = 15 }
+== APP - checkout == Order passed: Order { OrderId = 15 }
+== APP - order-processor == Order received : Order { orderId = 16 }
+== APP - checkout == Order passed: Order { OrderId = 16 }
+== APP - order-processor == Order received : Order { orderId = 17 }
+== APP - checkout == Order passed: Order { OrderId = 17 }
+== APP - order-processor == Order received : Order { orderId = 18 }
+== APP - checkout == Order passed: Order { OrderId = 18 }
+== APP - order-processor == Order received : Order { orderId = 19 }
+== APP - checkout == Order passed: Order { OrderId = 19 }
+== APP - order-processor == Order received : Order { orderId = 20 }
+== APP - checkout == Order passed: Order { OrderId = 20 }
+Exited App successfully
+```
+
+### What happened?
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-service" >}}) and [publisher]({{< ref "#checkout-service" >}}) applications using the `dapr.yaml` Multi-App Run template file.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+apps:
+ - appDirPath: ./order-processor/
+ appID: order-processor
+ appPort: 8001
+ command: ["python3", "app.py"]
+ - appID: checkout
+ appDirPath: ./checkout/
+ command: ["python3", "app.py"]
+```
+
+##### `order-processor` service
+
+The `order-processor` service receives the call from the `checkout` service:
+
+```py
+@app.route('/orders', methods=['POST'])
+def getOrder():
+ data = request.json
+ print('Order received : ' + json.dumps(data), flush=True)
+ return json.dumps({'success': True}), 200, {
+ 'ContentType': 'application/json'}
+
+
+app.run(port=8001)
+```
+
+#### `checkout` service
+
+In the `checkout` service, you'll notice there's no need to rewrite your app code to use Dapr's service invocation. You can enable service invocation by simply adding the `dapr-app-id` header, which specifies the ID of the target service.
+
+```python
+headers = {'dapr-app-id': 'order-processor'}
+
+result = requests.post(
+ url='%s/orders' % (base_url),
+ data=json.dumps(order),
+ headers=headers
+)
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Latest Node.js installed](https://nodejs.org/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/service_invocation).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstart clone directory, navigate to the quickstart directory.
+
+```bash
+cd service_invocation/javascript/http
+```
+
+### Step 3: Run the `order-processor` and `checkout` services
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` service
+- The `checkout` service
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - order-processor == Order received : Order { orderId = 1 }
+== APP - checkout == Order passed: Order { OrderId = 1 }
+== APP - order-processor == Order received : Order { orderId = 2 }
+== APP - checkout == Order passed: Order { OrderId = 2 }
+== APP - order-processor == Order received : Order { orderId = 3 }
+== APP - checkout == Order passed: Order { OrderId = 3 }
+== APP - order-processor == Order received : Order { orderId = 4 }
+== APP - checkout == Order passed: Order { OrderId = 4 }
+== APP - order-processor == Order received : Order { orderId = 5 }
+== APP - checkout == Order passed: Order { OrderId = 5 }
+== APP - order-processor == Order received : Order { orderId = 6 }
+== APP - checkout == Order passed: Order { OrderId = 6 }
+== APP - order-processor == Order received : Order { orderId = 7 }
+== APP - checkout == Order passed: Order { OrderId = 7 }
+== APP - order-processor == Order received : Order { orderId = 8 }
+== APP - checkout == Order passed: Order { OrderId = 8 }
+== APP - order-processor == Order received : Order { orderId = 9 }
+== APP - checkout == Order passed: Order { OrderId = 9 }
+== APP - order-processor == Order received : Order { orderId = 10 }
+== APP - checkout == Order passed: Order { OrderId = 10 }
+== APP - order-processor == Order received : Order { orderId = 11 }
+== APP - checkout == Order passed: Order { OrderId = 11 }
+== APP - order-processor == Order received : Order { orderId = 12 }
+== APP - checkout == Order passed: Order { OrderId = 12 }
+== APP - order-processor == Order received : Order { orderId = 13 }
+== APP - checkout == Order passed: Order { OrderId = 13 }
+== APP - order-processor == Order received : Order { orderId = 14 }
+== APP - checkout == Order passed: Order { OrderId = 14 }
+== APP - order-processor == Order received : Order { orderId = 15 }
+== APP - checkout == Order passed: Order { OrderId = 15 }
+== APP - order-processor == Order received : Order { orderId = 16 }
+== APP - checkout == Order passed: Order { OrderId = 16 }
+== APP - order-processor == Order received : Order { orderId = 17 }
+== APP - checkout == Order passed: Order { OrderId = 17 }
+== APP - order-processor == Order received : Order { orderId = 18 }
+== APP - checkout == Order passed: Order { OrderId = 18 }
+== APP - order-processor == Order received : Order { orderId = 19 }
+== APP - checkout == Order passed: Order { OrderId = 19 }
+== APP - order-processor == Order received : Order { orderId = 20 }
+== APP - checkout == Order passed: Order { OrderId = 20 }
+Exited App successfully
+```
+
+### What happened?
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-service" >}}) and [publisher]({{< ref "#checkout-service" >}}) applications using the `dapr.yaml` Multi-App Run template file.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+apps:
+ - appDirPath: ./order-processor/
+ appID: order-processor
+ appPort: 5001
+ command: ["npm", "start"]
+ - appID: checkout
+ appDirPath: ./checkout/
+ command: ["npm", "start"]
+```
+
+##### `order-processor` service
+
+The `order-processor` service receives the call from the `checkout` service:
+
+```javascript
+app.post('/orders', (req, res) => {
+ console.log("Order received:", req.body);
+ res.sendStatus(200);
+});
+```
+
+##### `checkout` service
+
+In the `checkout` service, you'll notice there's no need to rewrite your app code to use Dapr's service invocation. You can enable service invocation by simply adding the `dapr-app-id` header, which specifies the ID of the target service.
+
+```javascript
+let axiosConfig = {
+ headers: {
+ "dapr-app-id": "order-processor"
+ }
+};
+const res = await axios.post(`${DAPR_HOST}:${DAPR_HTTP_PORT}/orders`, order , axiosConfig);
+console.log("Order passed: " + res.config.data);
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [.NET SDK or .NET 7 SDK installed](https://dotnet.microsoft.com/download).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/service_invocation).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstart clone directory, navigate to the quickstart directory.
+
+```bash
+cd service_invocation/csharp/http
+```
+
+### Step 3: Run the `order-processor` and `checkout` services
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` service
+- The `checkout` service
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - order-processor == Order received : Order { orderId = 1 }
+== APP - checkout == Order passed: Order { OrderId = 1 }
+== APP - order-processor == Order received : Order { orderId = 2 }
+== APP - checkout == Order passed: Order { OrderId = 2 }
+== APP - order-processor == Order received : Order { orderId = 3 }
+== APP - checkout == Order passed: Order { OrderId = 3 }
+== APP - order-processor == Order received : Order { orderId = 4 }
+== APP - checkout == Order passed: Order { OrderId = 4 }
+== APP - order-processor == Order received : Order { orderId = 5 }
+== APP - checkout == Order passed: Order { OrderId = 5 }
+== APP - order-processor == Order received : Order { orderId = 6 }
+== APP - checkout == Order passed: Order { OrderId = 6 }
+== APP - order-processor == Order received : Order { orderId = 7 }
+== APP - checkout == Order passed: Order { OrderId = 7 }
+== APP - order-processor == Order received : Order { orderId = 8 }
+== APP - checkout == Order passed: Order { OrderId = 8 }
+== APP - order-processor == Order received : Order { orderId = 9 }
+== APP - checkout == Order passed: Order { OrderId = 9 }
+== APP - order-processor == Order received : Order { orderId = 10 }
+== APP - checkout == Order passed: Order { OrderId = 10 }
+== APP - order-processor == Order received : Order { orderId = 11 }
+== APP - checkout == Order passed: Order { OrderId = 11 }
+== APP - order-processor == Order received : Order { orderId = 12 }
+== APP - checkout == Order passed: Order { OrderId = 12 }
+== APP - order-processor == Order received : Order { orderId = 13 }
+== APP - checkout == Order passed: Order { OrderId = 13 }
+== APP - order-processor == Order received : Order { orderId = 14 }
+== APP - checkout == Order passed: Order { OrderId = 14 }
+== APP - order-processor == Order received : Order { orderId = 15 }
+== APP - checkout == Order passed: Order { OrderId = 15 }
+== APP - order-processor == Order received : Order { orderId = 16 }
+== APP - checkout == Order passed: Order { OrderId = 16 }
+== APP - order-processor == Order received : Order { orderId = 17 }
+== APP - checkout == Order passed: Order { OrderId = 17 }
+== APP - order-processor == Order received : Order { orderId = 18 }
+== APP - checkout == Order passed: Order { OrderId = 18 }
+== APP - order-processor == Order received : Order { orderId = 19 }
+== APP - checkout == Order passed: Order { OrderId = 19 }
+== APP - order-processor == Order received : Order { orderId = 20 }
+== APP - checkout == Order passed: Order { OrderId = 20 }
+Exited App successfully
+```
+
+### What happened?
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-service" >}}) and [publisher]({{< ref "#checkout-service" >}}) applications using the `dapr.yaml` Multi-App Run template file.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+apps:
+ - appDirPath: ./order-processor/
+ appID: order-processor
+ appPort: 7001
+ command: ["dotnet", "run"]
+ - appID: checkout
+ appDirPath: ./checkout/
+ command: ["dotnet", "run"]
+```
+
+##### `order-processor` service
+
+The `order-processor` service receives the call from the `checkout` service:
+
+```csharp
+app.MapPost("/orders", (Order order) =>
+{
+ Console.WriteLine("Order received : " + order);
+ return order.ToString();
+});
+```
+
+##### `checkout` service
+
+In the Program.cs file for the `checkout` service, you'll notice there's no need to rewrite your app code to use Dapr's service invocation. You can enable service invocation by simply adding the `dapr-app-id` header, which specifies the ID of the target service.
+
+```csharp
+var client = new HttpClient();
+client.DefaultRequestHeaders.Accept.Add(new System.Net.Http.Headers.MediaTypeWithQualityHeaderValue("application/json"));
+
+client.DefaultRequestHeaders.Add("dapr-app-id", "order-processor");
+
+var response = await client.PostAsync($"{baseURL}/orders", content);
+ Console.WriteLine("Order passed: " + order);
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- Java JDK 11 (or greater):
+ - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or
+ - OpenJDK
+- [Apache Maven](https://maven.apache.org/install.html), version 3.x.
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/service_invocation).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstart clone directory, navigate to the quickstart directory.
+
+```bash
+cd service_invocation/java/http
+```
+
+### Step 3: Run the `order-processor` and `checkout` services
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` service
+- The `checkout` service
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - order-processor == Order received : Order { orderId = 1 }
+== APP - checkout == Order passed: Order { OrderId = 1 }
+== APP - order-processor == Order received : Order { orderId = 2 }
+== APP - checkout == Order passed: Order { OrderId = 2 }
+== APP - order-processor == Order received : Order { orderId = 3 }
+== APP - checkout == Order passed: Order { OrderId = 3 }
+== APP - order-processor == Order received : Order { orderId = 4 }
+== APP - checkout == Order passed: Order { OrderId = 4 }
+== APP - order-processor == Order received : Order { orderId = 5 }
+== APP - checkout == Order passed: Order { OrderId = 5 }
+== APP - order-processor == Order received : Order { orderId = 6 }
+== APP - checkout == Order passed: Order { OrderId = 6 }
+== APP - order-processor == Order received : Order { orderId = 7 }
+== APP - checkout == Order passed: Order { OrderId = 7 }
+== APP - order-processor == Order received : Order { orderId = 8 }
+== APP - checkout == Order passed: Order { OrderId = 8 }
+== APP - order-processor == Order received : Order { orderId = 9 }
+== APP - checkout == Order passed: Order { OrderId = 9 }
+== APP - order-processor == Order received : Order { orderId = 10 }
+== APP - checkout == Order passed: Order { OrderId = 10 }
+== APP - order-processor == Order received : Order { orderId = 11 }
+== APP - checkout == Order passed: Order { OrderId = 11 }
+== APP - order-processor == Order received : Order { orderId = 12 }
+== APP - checkout == Order passed: Order { OrderId = 12 }
+== APP - order-processor == Order received : Order { orderId = 13 }
+== APP - checkout == Order passed: Order { OrderId = 13 }
+== APP - order-processor == Order received : Order { orderId = 14 }
+== APP - checkout == Order passed: Order { OrderId = 14 }
+== APP - order-processor == Order received : Order { orderId = 15 }
+== APP - checkout == Order passed: Order { OrderId = 15 }
+== APP - order-processor == Order received : Order { orderId = 16 }
+== APP - checkout == Order passed: Order { OrderId = 16 }
+== APP - order-processor == Order received : Order { orderId = 17 }
+== APP - checkout == Order passed: Order { OrderId = 17 }
+== APP - order-processor == Order received : Order { orderId = 18 }
+== APP - checkout == Order passed: Order { OrderId = 18 }
+== APP - order-processor == Order received : Order { orderId = 19 }
+== APP - checkout == Order passed: Order { OrderId = 19 }
+== APP - order-processor == Order received : Order { orderId = 20 }
+== APP - checkout == Order passed: Order { OrderId = 20 }
+Exited App successfully
+```
+
+### What happened?
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-service" >}}) and [publisher]({{< ref "#checkout-service" >}}) applications using the `dapr.yaml` Multi-App Run template file.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+apps:
+ - appDirPath: ./order-processor/
+ appID: order-processor
+ appPort: 9001
+ command: ["java", "-jar", "target/OrderProcessingService-0.0.1-SNAPSHOT.jar"]
+ - appID: checkout
+ appDirPath: ./checkout/
+ command: ["java", "-jar", "target/CheckoutService-0.0.1-SNAPSHOT.jar"]
+```
+
+##### `order-processor` service
+
+The `order-processor` service receives the call from the `checkout` service:
+
+```java
+public String processOrders(@RequestBody Order body) {
+ System.out.println("Order received: "+ body.getOrderId());
+ return "CID" + body.getOrderId();
+ }
+```
+
+##### `checkout` service
+
+In the `checkout` service, you'll notice there's no need to rewrite your app code to use Dapr's service invocation. You can enable service invocation by simply adding the `dapr-app-id` header, which specifies the ID of the target service.
+
+```java
+.header("Content-Type", "application/json")
+.header("dapr-app-id", "order-processor")
+
+HttpResponse response = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
+System.out.println("Order passed: "+ orderId)
+```
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Latest version of Go](https://go.dev/dl/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/service_invocation).
+
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+From the root of the Quickstart clone directory, navigate to the quickstart directory.
+
+```bash
+cd service_invocation/go/http
+```
+
+### Step 3: Run the `order-processor` and `checkout` services
+
+With the following command, simultaneously run the following services alongside their own Dapr sidecars:
+- The `order-processor` service
+- The `checkout` service
+
+```bash
+dapr run -f .
+```
+
+**Expected output**
+
+```
+== APP - order-processor == Order received : Order { orderId = 1 }
+== APP - checkout == Order passed: Order { OrderId = 1 }
+== APP - order-processor == Order received : Order { orderId = 2 }
+== APP - checkout == Order passed: Order { OrderId = 2 }
+== APP - order-processor == Order received : Order { orderId = 3 }
+== APP - checkout == Order passed: Order { OrderId = 3 }
+== APP - order-processor == Order received : Order { orderId = 4 }
+== APP - checkout == Order passed: Order { OrderId = 4 }
+== APP - order-processor == Order received : Order { orderId = 5 }
+== APP - checkout == Order passed: Order { OrderId = 5 }
+== APP - order-processor == Order received : Order { orderId = 6 }
+== APP - checkout == Order passed: Order { OrderId = 6 }
+== APP - order-processor == Order received : Order { orderId = 7 }
+== APP - checkout == Order passed: Order { OrderId = 7 }
+== APP - order-processor == Order received : Order { orderId = 8 }
+== APP - checkout == Order passed: Order { OrderId = 8 }
+== APP - order-processor == Order received : Order { orderId = 9 }
+== APP - checkout == Order passed: Order { OrderId = 9 }
+== APP - order-processor == Order received : Order { orderId = 10 }
+== APP - checkout == Order passed: Order { OrderId = 10 }
+== APP - order-processor == Order received : Order { orderId = 11 }
+== APP - checkout == Order passed: Order { OrderId = 11 }
+== APP - order-processor == Order received : Order { orderId = 12 }
+== APP - checkout == Order passed: Order { OrderId = 12 }
+== APP - order-processor == Order received : Order { orderId = 13 }
+== APP - checkout == Order passed: Order { OrderId = 13 }
+== APP - order-processor == Order received : Order { orderId = 14 }
+== APP - checkout == Order passed: Order { OrderId = 14 }
+== APP - order-processor == Order received : Order { orderId = 15 }
+== APP - checkout == Order passed: Order { OrderId = 15 }
+== APP - order-processor == Order received : Order { orderId = 16 }
+== APP - checkout == Order passed: Order { OrderId = 16 }
+== APP - order-processor == Order received : Order { orderId = 17 }
+== APP - checkout == Order passed: Order { OrderId = 17 }
+== APP - order-processor == Order received : Order { orderId = 18 }
+== APP - checkout == Order passed: Order { OrderId = 18 }
+== APP - order-processor == Order received : Order { orderId = 19 }
+== APP - checkout == Order passed: Order { OrderId = 19 }
+== APP - order-processor == Order received : Order { orderId = 20 }
+== APP - checkout == Order passed: Order { OrderId = 20 }
+Exited App successfully
+```
+
+### What happened?
+
+Running `dapr run -f .` in this Quickstart started both the [subscriber]({{< ref "#order-processor-service" >}}) and [publisher]({{< ref "#checkout-service" >}}) applications using the `dapr.yaml` Multi-App Run template file.
+
+##### `dapr.yaml` Multi-App Run template file
+
+Running the [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) with `dapr run -f .` starts all applications in your project. In this Quickstart, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+apps:
+ - appDirPath: ./order-processor/
+ appID: order-processor
+ appPort: 6006
+ command: ["go", "run", "."]
+ - appID: checkout
+ appDirPath: ./checkout/
+ command: ["go", "run", "."]
+```
+
+##### `order-processor` service
+
+In the `order-processor` service, each order is received via an HTTP POST request and processed by the `getOrder` function.
+
+```go
+func getOrder(w http.ResponseWriter, r *http.Request) {
+ data, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ log.Fatal(err)
+ }
+ log.Printf("Order received : %s", string(data))
+}
+```
+
+##### `checkout` service
+
+In the `checkout` service, you'll notice there's no need to rewrite your app code to use Dapr's service invocation. You can enable service invocation by simply adding the `dapr-app-id` header, which specifies the ID of the target service.
+
+```go
+req.Header.Add("dapr-app-id", "order-processor")
+
+response, err := client.Do(req)
+```
+
+{{% /codetab %}}
+
+{{% /tabs %}}
+
+## Run one application at a time
+
Select your preferred language before proceeding with the Quickstart.
{{< tabs "Python" "JavaScript" ".NET" "Java" "Go" >}}
diff --git a/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md
index f73aa37f494..4d1224c4eb7 100644
--- a/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md
+++ b/daprdocs/content/en/getting-started/quickstarts/statemanagement-quickstart.md
@@ -6,10 +6,652 @@ weight: 72
description: "Get started with Dapr's State Management building block"
---
-Let's take a look at Dapr's [State Management building block]({{< ref state-management >}}). In this Quickstart, you will save, get, and delete state using a Redis state store, but you can swap this out for any one of the [supported state stores]({{< ref supported-state-stores.md >}}).
+Let's take a look at Dapr's [State Management building block]({{< ref state-management >}}). In this Quickstart, you will save, get, and delete state using a Redis state store by either:
+- [Running all applications simultaneously with the Multi-App Run template file]({{< ref "#run-using-multi-app-run" >}}), or
+- [Running a single application at a time]({{< ref "#run-one-application-at-a-time" >}})
+While this sample uses Redis, you can swap it out for any one of the [supported state stores]({{< ref supported-state-stores.md >}}).
+
+## Run using Multi-App Run
+
+Select your preferred language-specific Dapr SDK before proceeding with the Quickstart.
+
+{{< tabs "Python" "JavaScript" ".NET" "Java" "Go" >}}
+
+{{% codetab %}}
+
+### Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Python 3.7+ installed](https://www.python.org/downloads/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 1: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/state_management).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+### Step 2: Manipulate service state
+
+In a terminal window, navigate to the `order-processor` directory.
+
+```bash
+cd state_management/python/sdk/order-processor
+```
+
+Run the `order-processor` service alongside a Dapr sidecar using [Multi-App Run]({{< ref multi-app-dapr-run >}}).
+
+```bash
+dapr run -f
+```
+
+The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop.
+
+```python
+with DaprClient() as client:
+
+ # Save state into the state store
+ client.save_state(DAPR_STORE_NAME, orderId, str(order))
+ logging.info('Saving Order: %s', order)
+
+ # Get state from the state store
+ result = client.get_state(DAPR_STORE_NAME, orderId)
+ logging.info('Result after get: ' + str(result.data))
+
+ # Delete state from the state store
+ client.delete_state(store_name=DAPR_STORE_NAME, key=orderId)
+ logging.info('Deleting Order: %s', order)
+```
+
+### Step 3: View the order-processor outputs
+
+Notice, as specified in the code above, the code saves application state in the Dapr state store, reads it, then deletes it.
+
+Order-processor output:
+```
+== APP == INFO:root:Saving Order: {'orderId': '1'}
+== APP == INFO:root:Result after get: b"{'orderId': '1'}"
+== APP == INFO:root:Deleting Order: {'orderId': '1'}
+== APP == INFO:root:Saving Order: {'orderId': '2'}
+== APP == INFO:root:Result after get: b"{'orderId': '2'}"
+== APP == INFO:root:Deleting Order: {'orderId': '2'}
+== APP == INFO:root:Saving Order: {'orderId': '3'}
+== APP == INFO:root:Result after get: b"{'orderId': '3'}"
+== APP == INFO:root:Deleting Order: {'orderId': '3'}
+== APP == INFO:root:Saving Order: {'orderId': '4'}
+== APP == INFO:root:Result after get: b"{'orderId': '4'}"
+== APP == INFO:root:Deleting Order: {'orderId': '4'}
+```
+
+##### `dapr.yaml` Multi-App Run template file
+
+When you run `dapr init`, Dapr creates a default [Multi-App Run template file]({{< ref multi-app-dapr-run >}}) named `dapr.yaml`. Running `dapr run -f` starts all applications in your project. In this sample, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../resources/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ command: ["python3" , "app.py"]
+```
+
+##### `statestore.yaml` component file
+
+When you run `dapr init`, Dapr also creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
+
+- On Windows, under `%UserProfile%\.dapr\components\statestore.yaml`
+- On Linux/MacOS, under `~/.dapr/components/statestore.yaml`
+
+With the `statestore.yaml` component, you can easily swap out the [state store](/reference/components-reference/supported-state-stores/) without making code changes.
+
+The Redis `statestore.yaml` file included for this quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: statestore
+spec:
+ type: state.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+ - name: actorStateStore
+ value: "true"
+```
+
+In the YAML file:
+
+- `metadata/name` is how your application talks to the component (called `DAPR_STORE_NAME` in the code sample).
+- `spec/metadata` defines the connection to the Redis instance used by the component.
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Latest Node.js installed](https://nodejs.org/download/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 1: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/state_management).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+### Step 2: Manipulate service state
+
+In a terminal window, navigate to the `order-processor` directory.
+
+```bash
+cd state_management/javascript/sdk/order-processor
+```
+
+Run the `order-processor` service alongside a Dapr sidecar.
+
+```bash
+dapr run -f
+```
+
+The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop.
+
+```js
+const client = new DaprClient()
+
+// Save state into a state store
+await client.state.save(DAPR_STATE_STORE_NAME, order)
+console.log("Saving Order: ", order)
+
+// Get state from a state store
+const savedOrder = await client.state.get(DAPR_STATE_STORE_NAME, order.orderId)
+console.log("Getting Order: ", savedOrder)
+
+// Delete state from the state store
+await client.state.delete(DAPR_STATE_STORE_NAME, order.orderId)
+console.log("Deleting Order: ", order)
+```
+### Step 3: View the order-processor outputs
+
+Notice, as specified in the code above, the code saves application state in the Dapr state store, reads it, then deletes it.
+
+Order-processor output:
+```
+== APP == > order-processor@1.0.0 start
+== APP == > node index.js
+== APP == Saving Order: { orderId: 1 }
+== APP == Saving Order: { orderId: 2 }
+== APP == Saving Order: { orderId: 3 }
+== APP == Saving Order: { orderId: 4 }
+== APP == Saving Order: { orderId: 5 }
+== APP == Getting Order: { orderId: 1 }
+== APP == Deleting Order: { orderId: 1 }
+== APP == Getting Order: { orderId: 2 }
+== APP == Deleting Order: { orderId: 2 }
+== APP == Getting Order: { orderId: 3 }
+== APP == Deleting Order: { orderId: 3 }
+== APP == Getting Order: { orderId: 4 }
+== APP == Deleting Order: { orderId: 4 }
+== APP == Getting Order: { orderId: 5 }
+== APP == Deleting Order: { orderId: 5 }
+```
+
+##### `dapr.yaml` Multi-App Run template file
+
+When you run `dapr init`, Dapr creates a default Multi-App Run template file named `dapr.yaml`. Running `dapr run -f` starts all applications in your project. In this sample, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../resources/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ command: ["npm", "run", "start"]
+```
+
+##### `statestore.yaml` component file
+
+When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
+
+- On Windows, under `%UserProfile%\.dapr\components\statestore.yaml`
+- On Linux/MacOS, under `~/.dapr/components/statestore.yaml`
+
+With the `statestore.yaml` component, you can easily swap out the [state store](/reference/components-reference/supported-state-stores/) without making code changes.
+
+The Redis `statestore.yaml` file included for this quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: statestore
+spec:
+ type: state.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+ - name: actorStateStore
+ value: "true"
+```
+
+In the YAML file:
+
+- `metadata/name` is how your application talks to the component (called `DAPR_STORE_NAME` in the code sample).
+- `spec/metadata` defines the connection to the Redis instance used by the component.
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [.NET SDK or .NET 6 SDK installed](https://dotnet.microsoft.com/download).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 1: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/state_management).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+### Step 2: Manipulate service state
+
+In a terminal window, navigate to the `order-processor` directory.
+
+```bash
+cd state_management/csharp/sdk/order-processor
+```
+
+Run the `order-processor` service alongside a Dapr sidecar.
+
+```bash
+dapr run -f
+```
+
+The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop.
+
+```cs
+var client = new DaprClientBuilder().Build();
+
+// Save state into the state store
+await client.SaveStateAsync(DAPR_STORE_NAME, orderId.ToString(), order.ToString());
+Console.WriteLine("Saving Order: " + order);
+
+// Get state from the state store
+var result = await client.GetStateAsync(DAPR_STORE_NAME, orderId.ToString());
+Console.WriteLine("Getting Order: " + result);
+
+// Delete state from the state store
+await client.DeleteStateAsync(DAPR_STORE_NAME, orderId.ToString());
+Console.WriteLine("Deleting Order: " + order);
+```
+### Step 3: View the order-processor outputs
+
+Notice, as specified in the code above, the code saves application state in the Dapr state store, reads it, then deletes it.
+
+Order-processor output:
+```
+== APP == Saving Order: Order { orderId = 1 }
+== APP == Getting Order: Order { orderId = 1 }
+== APP == Deleting Order: Order { orderId = 1 }
+== APP == Saving Order: Order { orderId = 2 }
+== APP == Getting Order: Order { orderId = 2 }
+== APP == Deleting Order: Order { orderId = 2 }
+== APP == Saving Order: Order { orderId = 3 }
+== APP == Getting Order: Order { orderId = 3 }
+== APP == Deleting Order: Order { orderId = 3 }
+== APP == Saving Order: Order { orderId = 4 }
+== APP == Getting Order: Order { orderId = 4 }
+== APP == Deleting Order: Order { orderId = 4 }
+== APP == Saving Order: Order { orderId = 5 }
+== APP == Getting Order: Order { orderId = 5 }
+== APP == Deleting Order: Order { orderId = 5 }
+```
+
+##### `dapr.yaml` Multi-App Run template file
+
+When you run `dapr init`, Dapr creates a default Multi-App Run template file named `dapr.yaml`. Running `dapr run -f` starts all applications in your project. In this sample, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../../resources/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ command: ["dotnet", "run"]
+```
+
+##### `statestore.yaml` component file
+
+When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
+
+- On Windows, under `%UserProfile%\.dapr\components\statestore.yaml`
+- On Linux/MacOS, under `~/.dapr/components/statestore.yaml`
+
+With the `statestore.yaml` component, you can easily swap out the [state store](/reference/components-reference/supported-state-stores/) without making code changes.
+
+The Redis `statestore.yaml` file included for this quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: statestore
+spec:
+ type: state.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+ - name: actorStateStore
+ value: "true"
+```
+
+In the YAML file:
+
+- `metadata/name` is how your application talks to the component (called `DAPR_STORE_NAME` in the code sample).
+- `spec/metadata` defines the connection to the Redis instance used by the component.
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- Java JDK 11 (or greater):
+ - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or
+ - OpenJDK
+- [Apache Maven](https://maven.apache.org/install.html), version 3.x.
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 1: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/state_management).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+### Step 2: Manipulate service state
+
+In a terminal window, navigate to the `order-processor` directory.
+
+```bash
+cd state_management/java/sdk/order-processor
+```
+
+Run the `order-processor` service alongside a Dapr sidecar.
+
+```bash
+dapr run -f
+```
+
+The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop.
+
+```java
+try (DaprClient client = new DaprClientBuilder().build()) {
+ for (int i = 1; i <= 10; i++) {
+ int orderId = i;
+ Order order = new Order();
+ order.setOrderId(orderId);
+
+ // Save state into the state store
+ client.saveState(DAPR_STATE_STORE, String.valueOf(orderId), order).block();
+ LOGGER.info("Saving Order: " + order.getOrderId());
+
+ // Get state from the state store
+ State response = client.getState(DAPR_STATE_STORE, String.valueOf(orderId), Order.class).block();
+ LOGGER.info("Getting Order: " + response.getValue().getOrderId());
+
+ // Delete state from the state store
+ client.deleteState(DAPR_STATE_STORE, String.valueOf(orderId)).block();
+ LOGGER.info("Deleting Order: " + orderId);
+ TimeUnit.MILLISECONDS.sleep(1000);
+ }
+```
+### Step 3: View the order-processor outputs
+
+Notice, as specified in the code above, the code saves application state in the Dapr state store, reads it, then deletes it.
+
+Order-processor output:
+```
+== APP == INFO:root:Saving Order: {'orderId': '1'}
+== APP == INFO:root:Result after get: b"{'orderId': '1'}"
+== APP == INFO:root:Deleting Order: {'orderId': '1'}
+== APP == INFO:root:Saving Order: {'orderId': '2'}
+== APP == INFO:root:Result after get: b"{'orderId': '2'}"
+== APP == INFO:root:Deleting Order: {'orderId': '2'}
+== APP == INFO:root:Saving Order: {'orderId': '3'}
+== APP == INFO:root:Result after get: b"{'orderId': '3'}"
+== APP == INFO:root:Deleting Order: {'orderId': '3'}
+== APP == INFO:root:Saving Order: {'orderId': '4'}
+== APP == INFO:root:Result after get: b"{'orderId': '4'}"
+== APP == INFO:root:Deleting Order: {'orderId': '4'}
+```
+
+##### `dapr.yaml` Multi-App Run template file
+
+When you run `dapr init`, Dapr creates a default Multi-App Run template file named `dapr.yaml`. Running `dapr run -f` starts all applications in your project. In this sample, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../resources/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ command: ["java", "-jar", "target/OrderProcessingService-0.0.1-SNAPSHOT.jar"]
+```
+
+##### `statestore.yaml` component file
+
+When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
+
+- On Windows, under `%UserProfile%\.dapr\components\statestore.yaml`
+- On Linux/MacOS, under `~/.dapr/components/statestore.yaml`
+
+With the `statestore.yaml` component, you can easily swap out the [state store](/reference/components-reference/supported-state-stores/) without making code changes.
+
+The Redis `statestore.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: statestore
+spec:
+ type: state.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+ - name: actorStateStore
+ value: "true"
+```
+
+In the YAML file:
+
+- `metadata/name` is how your application talks to the component (called `DAPR_STORE_NAME` in the code sample).
+- `spec/metadata` defines the connection to the Redis instance used by the component.
+
+{{% /codetab %}}
+
+
+{{% codetab %}}
+
+### Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Latest version of Go](https://go.dev/dl/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 1: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/state_management).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+### Step 2: Manipulate service state
+
+In a terminal window, navigate to the `order-processor` directory.
+
+```bash
+cd state_management/go/sdk/order-processor
+```
+
+Run the `order-processor` service alongside a Dapr sidecar.
+
+```bash
+dapr run -f
+```
+
+The `order-processor` service writes, reads, and deletes an `orderId` key/value pair to the `statestore` instance [defined in the `statestore.yaml` component]({{< ref "#statestoreyaml-component-file" >}}). As soon as the service starts, it performs a loop.
+
+```go
+ client, err := dapr.NewClient()
+
+ // Save state into the state store
+ _ = client.SaveState(ctx, STATE_STORE_NAME, strconv.Itoa(orderId), []byte(order))
+ log.Print("Saving Order: " + string(order))
+
+ // Get state from the state store
+ result, _ := client.GetState(ctx, STATE_STORE_NAME, strconv.Itoa(orderId))
+ fmt.Println("Getting Order: " + string(result.Value))
+
+ // Delete state from the state store
+ _ = client.DeleteState(ctx, STATE_STORE_NAME, strconv.Itoa(orderId))
+ log.Print("Deleting Order: " + string(order))
+```
+
+### Step 3: View the order-processor outputs
+
+Notice, as specified in the code above, the code saves application state in the Dapr state store, reads it, then deletes it.
+
+Order-processor output:
+```
+== APP == dapr client initializing for: 127.0.0.1:53689
+== APP == 2022/04/01 09:16:03 Saving Order: {"orderId":1}
+== APP == Getting Order: {"orderId":1}
+== APP == 2022/04/01 09:16:03 Deleting Order: {"orderId":1}
+== APP == 2022/04/01 09:16:03 Saving Order: {"orderId":2}
+== APP == Getting Order: {"orderId":2}
+== APP == 2022/04/01 09:16:03 Deleting Order: {"orderId":2}
+== APP == 2022/04/01 09:16:03 Saving Order: {"orderId":3}
+== APP == Getting Order: {"orderId":3}
+== APP == 2022/04/01 09:16:03 Deleting Order: {"orderId":3}
+== APP == 2022/04/01 09:16:03 Saving Order: {"orderId":4}
+== APP == Getting Order: {"orderId":4}
+== APP == 2022/04/01 09:16:03 Deleting Order: {"orderId":4}
+== APP == 2022/04/01 09:16:03 Saving Order: {"orderId":5}
+== APP == Getting Order: {"orderId":5}
+== APP == 2022/04/01 09:16:03 Deleting Order: {"orderId":5}
+```
+
+##### `dapr.yaml` Multi-App Run template file
+
+When you run `dapr init`, Dapr creates a default Multi-App Run template file named `dapr.yaml`. Running `dapr run -f` starts all applications in your project. In this sample, the `dapr.yaml` file contains the following:
+
+```yml
+version: 1
+common:
+ resourcesPath: ../../resources/
+apps:
+ - appID: order-processor
+ appDirPath: ./order-processor/
+ command: ["go", "run", "."]
+```
+
+##### `statestore.yaml` component file
+
+When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
+
+- On Windows, under `%UserProfile%\.dapr\components\statestore.yaml`
+- On Linux/MacOS, under `~/.dapr/components/statestore.yaml`
+
+With the `statestore.yaml` component, you can easily swap out the [state store](/reference/components-reference/supported-state-stores/) without making code changes.
+
+The Redis `statestore.yaml` file included for this Quickstart contains the following:
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Component
+metadata:
+ name: statestore
+spec:
+ type: state.redis
+ version: v1
+ metadata:
+ - name: redisHost
+ value: localhost:6379
+ - name: redisPassword
+ value: ""
+ - name: actorStateStore
+ value: "true"
+```
+
+In the YAML file:
+
+- `metadata/name` is how your application talks to the component (called `DAPR_STORE_NAME` in the code sample).
+- `spec/metadata` defines the connection to the Redis instance used by the component.
+
+{{% /codetab %}}
+
+{{< /tabs >}}
+
+
+## Run one application at a time
+
Select your preferred language-specific Dapr SDK before proceeding with the Quickstart.
{{< tabs "Python" "JavaScript" ".NET" "Java" "Go" >}}
@@ -94,7 +736,7 @@ Order-processor output:
== APP == INFO:root:Deleting Order: {'orderId': '4'}
```
-#### `statestore.yaml` component file
+##### `statestore.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
@@ -216,7 +858,7 @@ Order-processor output:
== APP == Deleting Order: { orderId: 5 }
```
-#### `statestore.yaml` component file
+##### `statestore.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
@@ -333,7 +975,7 @@ Order-processor output:
== APP == Deleting Order: Order { orderId = 5 }
```
-#### `statestore.yaml` component file
+##### `statestore.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
@@ -455,7 +1097,7 @@ Order-processor output:
== APP == INFO:root:Deleting Order: {'orderId': '4'}
```
-#### `statestore.yaml` component file
+##### `statestore.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
@@ -573,7 +1215,7 @@ Order-processor output:
== APP == 2022/04/01 09:16:03 Deleting Order: {"orderId":5}
```
-#### `statestore.yaml` component file
+##### `statestore.yaml` component file
When you run `dapr init`, Dapr creates a default Redis `statestore.yaml` and runs a Redis container on your local machine, located:
diff --git a/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md b/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md
index d139561cb04..0a1f2e77900 100644
--- a/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md
+++ b/daprdocs/content/en/getting-started/quickstarts/workflow-quickstart.md
@@ -7,10 +7,10 @@ description: Get started with the Dapr Workflow building block
---
{{% alert title="Note" color="primary" %}}
-The workflow building block is currently in **alpha**.
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
{{% /alert %}}
-Let's take a look at the Dapr [Workflow building block]({{< ref workflow >}}). In this Quickstart, you'll create a simple console application to demonstrate Dapr's workflow programming model and the workflow management APIs.
+Let's take a look at the Dapr [Workflow building block]({{< ref workflow-overview.md >}}). In this Quickstart, you'll create a simple console application to demonstrate Dapr's workflow programming model and the workflow management APIs.
In this guide, you'll:
@@ -21,7 +21,251 @@ In this guide, you'll:
-{{< tabs ".NET" "Python" >}}
+{{< tabs "Python" ".NET" "Java" >}}
+
+
+{{% codetab %}}
+
+The `order-processor` console app starts and manages the `order_processing_workflow`, which simulates purchasing items from a store. The workflow consists of five unique workflow activities, or tasks:
+
+- `notify_activity`: Utilizes a logger to print out messages throughout the workflow. These messages notify you when:
+ - You have insufficient inventory
+ - Your payment couldn't be processed, etc.
+- `process_payment_activity`: Processes and authorizes the payment.
+- `verify_inventory_activity`: Checks the state store to ensure there is enough inventory present for purchase.
+- `update_inventory_activity`: Removes the requested items from the state store and updates the store with the new remaining inventory value.
+- `request_approval_activity`: Seeks approval from the manager if payment is greater than 50,000 USD.
+
+### Step 1: Pre-requisites
+
+For this example, you will need:
+
+- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
+- [Python 3.7+ installed](https://www.python.org/downloads/).
+
+- [Docker Desktop](https://www.docker.com/products/docker-desktop)
+
+
+### Step 2: Set up the environment
+
+Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quickstarts/tree/master/workflows).
+
+```bash
+git clone https://github.com/dapr/quickstarts.git
+```
+
+In a new terminal window, navigate to the `order-processor` directory:
+
+```bash
+cd workflows/python/sdk/order-processor
+```
+
+Install the Dapr Python SDK package:
+
+```bash
+pip3 install -r requirements.txt
+```
+
+### Step 3: Run the order processor app
+
+In the terminal, start the order processor app alongside a Dapr sidecar:
+
+```bash
+dapr run --app-id order-processor --resources-path ../../../components/ -- python3 app.py
+```
+
+> **Note:** Since Python3.exe is not defined in Windows, you may need to use `python app.py` instead of `python3 app.py`.
+
+This starts the `order-processor` app with unique workflow ID and runs the workflow activities.
+
+Expected output:
+
+```bash
+== APP == Starting order workflow, purchasing 10 of cars
+== APP == 2023-06-06 09:35:52.945 durabletask-worker INFO: Successfully connected to 127.0.0.1:65406. Waiting for work items...
+== APP == INFO:NotifyActivity:Received order f4e1926e-3721-478d-be8a-f5bebd1995da for 10 cars at $150000 !
+== APP == INFO:VerifyInventoryActivity:Verifying inventory for order f4e1926e-3721-478d-be8a-f5bebd1995da of 10 cars
+== APP == INFO:VerifyInventoryActivity:There are 100 Cars available for purchase
+== APP == INFO:RequestApprovalActivity:Requesting approval for payment of 165000 USD for 10 cars
+== APP == 2023-06-06 09:36:05.969 durabletask-worker INFO: f4e1926e-3721-478d-be8a-f5bebd1995da Event raised: manager_approval
+== APP == INFO:NotifyActivity:Payment for order f4e1926e-3721-478d-be8a-f5bebd1995da has been approved!
+== APP == INFO:ProcessPaymentActivity:Processing payment: f4e1926e-3721-478d-be8a-f5bebd1995da for 10 cars at 150000 USD
+== APP == INFO:ProcessPaymentActivity:Payment for request ID f4e1926e-3721-478d-be8a-f5bebd1995da processed successfully
+== APP == INFO:UpdateInventoryActivity:Checking inventory for order f4e1926e-3721-478d-be8a-f5bebd1995da for 10 cars
+== APP == INFO:UpdateInventoryActivity:There are now 90 cars left in stock
+== APP == INFO:NotifyActivity:Order f4e1926e-3721-478d-be8a-f5bebd1995da has completed!
+== APP == 2023-06-06 09:36:06.106 durabletask-worker INFO: f4e1926e-3721-478d-be8a-f5bebd1995da: Orchestration completed with status: COMPLETED
+== APP == Workflow completed! Result: Completed
+== APP == Purchase of item is Completed
+```
+
+### (Optional) Step 4: View in Zipkin
+
+Running `dapr init` launches the [openzipkin/zipkin](https://hub.docker.com/r/openzipkin/zipkin/) Docker container. If the container has stopped running, launch the Zipkin Docker container with the following command:
+
+```
+docker run -d -p 9411:9411 openzipkin/zipkin
+```
+
+View the workflow trace spans in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
+
+
+
+### What happened?
+
+When you ran `dapr run --app-id order-processor --resources-path ../../../components/ -- python3 app.py`:
+
+1. A unique order ID for the workflow is generated (in the above example, `f4e1926e-3721-478d-be8a-f5bebd1995da`) and the workflow is scheduled.
+1. The `NotifyActivity` workflow activity sends a notification saying an order for 10 cars has been received.
+1. The `ReserveInventoryActivity` workflow activity checks the inventory data, determines if you can supply the ordered item, and responds with the number of cars in stock.
+1. Your workflow starts and notifies you of its status.
+1. The `ProcessPaymentActivity` workflow activity begins processing payment for order `f4e1926e-3721-478d-be8a-f5bebd1995da` and confirms if successful.
+1. The `UpdateInventoryActivity` workflow activity updates the inventory with the current available cars after the order has been processed.
+1. The `NotifyActivity` workflow activity sends a notification saying that order `f4e1926e-3721-478d-be8a-f5bebd1995da` has completed.
+1. The workflow terminates as completed.
+
+#### `order-processor/app.py`
+
+In the application's program file:
+- The unique workflow order ID is generated
+- The workflow is scheduled
+- The workflow status is retrieved
+- The workflow and the workflow activities it invokes are registered
+
+```python
+class WorkflowConsoleApp:
+ def main(self):
+ # Register workflow and activities
+ workflowRuntime = WorkflowRuntime(settings.DAPR_RUNTIME_HOST, settings.DAPR_GRPC_PORT)
+ workflowRuntime.register_workflow(order_processing_workflow)
+ workflowRuntime.register_activity(notify_activity)
+ workflowRuntime.register_activity(requst_approval_activity)
+ workflowRuntime.register_activity(verify_inventory_activity)
+ workflowRuntime.register_activity(process_payment_activity)
+ workflowRuntime.register_activity(update_inventory_activity)
+ workflowRuntime.start()
+
+ print("==========Begin the purchase of item:==========", flush=True)
+ item_name = default_item_name
+ order_quantity = 10
+
+ total_cost = int(order_quantity) * baseInventory[item_name].per_item_cost
+ order = OrderPayload(item_name=item_name, quantity=int(order_quantity), total_cost=total_cost)
+
+ # Start Workflow
+ print(f'Starting order workflow, purchasing {order_quantity} of {item_name}', flush=True)
+ start_resp = daprClient.start_workflow(workflow_component=workflow_component,
+ workflow_name=workflow_name,
+ input=order)
+ _id = start_resp.instance_id
+
+ def prompt_for_approval(daprClient: DaprClient):
+ daprClient.raise_workflow_event(instance_id=_id, workflow_component=workflow_component,
+ event_name="manager_approval", event_data={'approval': True})
+
+ approval_seeked = False
+ start_time = datetime.now()
+ while True:
+ time_delta = datetime.now() - start_time
+ state = daprClient.get_workflow(instance_id=_id, workflow_component=workflow_component)
+ if not state:
+ print("Workflow not found!") # not expected
+ elif state.runtime_status == "Completed" or\
+ state.runtime_status == "Failed" or\
+ state.runtime_status == "Terminated":
+ print(f'Workflow completed! Result: {state.runtime_status}', flush=True)
+ break
+ if time_delta.total_seconds() >= 10:
+ state = daprClient.get_workflow(instance_id=_id, workflow_component=workflow_component)
+ if total_cost > 50000 and (
+ state.runtime_status != "Completed" or
+ state.runtime_status != "Failed" or
+ state.runtime_status != "Terminated"
+ ) and not approval_seeked:
+ approval_seeked = True
+ threading.Thread(target=prompt_for_approval(daprClient), daemon=True).start()
+
+ print("Purchase of item is ", state.runtime_status, flush=True)
+
+ def restock_inventory(self, daprClient: DaprClient, baseInventory):
+ for key, item in baseInventory.items():
+ print(f'item: {item}')
+ item_str = f'{{"name": "{item.item_name}", "quantity": {item.quantity},\
+ "per_item_cost": {item.per_item_cost}}}'
+ daprClient.save_state("statestore-actors", key, item_str)
+
+if __name__ == '__main__':
+ app = WorkflowConsoleApp()
+ app.main()
+```
+
+#### `order-processor/workflow.py`
+
+In `workflow.py`, the workflow is defined as a class with all of its associated tasks (determined by workflow activities).
+
+```python
+ def order_processing_workflow(ctx: DaprWorkflowContext, order_payload_str: OrderPayload):
+ """Defines the order processing workflow.
+ When the order is received, the inventory is checked to see if there is enough inventory to
+ fulfill the order. If there is enough inventory, the payment is processed and the inventory is
+ updated. If there is not enough inventory, the order is rejected.
+ If the total order is greater than $50,000, the order is sent to a manager for approval.
+ """
+ order_id = ctx.instance_id
+ order_payload=json.loads(order_payload_str)
+ yield ctx.call_activity(notify_activity,
+ input=Notification(message=('Received order ' +order_id+ ' for '
+ +f'{order_payload["quantity"]}' +' ' +f'{order_payload["item_name"]}'
+ +' at $'+f'{order_payload["total_cost"]}' +' !')))
+ result = yield ctx.call_activity(verify_inventory_activity,
+ input=InventoryRequest(request_id=order_id,
+ item_name=order_payload["item_name"],
+ quantity=order_payload["quantity"]))
+ if not result.success:
+ yield ctx.call_activity(notify_activity,
+ input=Notification(message='Insufficient inventory for '
+ +f'{order_payload["item_name"]}'+'!'))
+ return OrderResult(processed=False)
+
+ if order_payload["total_cost"] > 50000:
+ yield ctx.call_activity(requst_approval_activity, input=order_payload)
+ approval_task = ctx.wait_for_external_event("manager_approval")
+ timeout_event = ctx.create_timer(timedelta(seconds=200))
+ winner = yield when_any([approval_task, timeout_event])
+ if winner == timeout_event:
+ yield ctx.call_activity(notify_activity,
+ input=Notification(message='Payment for order '+order_id
+ +' has been cancelled due to timeout!'))
+ return OrderResult(processed=False)
+ approval_result = yield approval_task
+ if approval_result["approval"]:
+ yield ctx.call_activity(notify_activity, input=Notification(
+ message=f'Payment for order {order_id} has been approved!'))
+ else:
+ yield ctx.call_activity(notify_activity, input=Notification(
+ message=f'Payment for order {order_id} has been rejected!'))
+ return OrderResult(processed=False)
+
+ yield ctx.call_activity(process_payment_activity, input=PaymentRequest(
+ request_id=order_id, item_being_purchased=order_payload["item_name"],
+ amount=order_payload["total_cost"], quantity=order_payload["quantity"]))
+
+ try:
+ yield ctx.call_activity(update_inventory_activity,
+ input=PaymentRequest(request_id=order_id,
+ item_being_purchased=order_payload["item_name"],
+ amount=order_payload["total_cost"],
+ quantity=order_payload["quantity"]))
+ except Exception:
+ yield ctx.call_activity(notify_activity,
+ input=Notification(message=f'Order {order_id} Failed!'))
+ return OrderResult(processed=False)
+
+ yield ctx.call_activity(notify_activity, input=Notification(
+ message=f'Order {order_id} has completed!'))
+ return OrderResult(processed=True)
+```
+{{% /codetab %}}
{{% codetab %}}
@@ -99,7 +343,13 @@ Expected output:
### (Optional) Step 4: View in Zipkin
-If you have Zipkin configured for Dapr locally on your machine, you can view the workflow trace spans in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
+Running `dapr init` launches the [openzipkin/zipkin](https://hub.docker.com/r/openzipkin/zipkin/) Docker container. If the container has stopped running, launch the Zipkin Docker container with the following command:
+
+```
+docker run -d -p 9411:9411 openzipkin/zipkin
+```
+
+View the workflow trace spans in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
@@ -254,27 +504,35 @@ The `Activities` directory holds the four workflow activities used by the workfl
- `ProcessPaymentActivity.cs`
- `UpdateInventoryActivity.cs`
+## Watch the demo
+
+Watch [this video to walk through the Dapr Workflow .NET demo](https://youtu.be/BxiKpEmchgQ?t=2564):
+
+
+
{{% /codetab %}}
-
+
{{% codetab %}}
-The `order-processor` console app starts and manages the `order_processing_workflow`, which simulates purchasing items from a store. The workflow consists of five unique workflow activities, or tasks:
+The `order-processor` console app starts and manages the lifecycle of an order processing workflow that stores and retrieves data in a state store. The workflow consists of four workflow activities, or tasks:
+- `NotifyActivity`: Utilizes a logger to print out messages throughout the workflow
+- `RequestApprovalActivity`: Requests approval for processing payment
+- `ReserveInventoryActivity`: Checks the state store to ensure that there is enough inventory for the purchase
+- `ProcessPaymentActivity`: Processes and authorizes the payment
+- `UpdateInventoryActivity`: Removes the requested items from the state store and updates the store with the new remaining inventory value
-- `notify_activity`: Utilizes a logger to print out messages throughout the workflow. These messages notify you when:
- - You have insufficient inventory
- - Your payment couldn't be processed, etc.
-- `process_payment_activity`: Processes and authorizes the payment.
-- `verify_inventory_activity`: Checks the state store to ensure there is enough inventory present for purchase.
-- `update_inventory_activity`: Removes the requested items from the state store and updates the store with the new remaining inventory value.
-- `request_approval_activity`: Seeks approval from the manager if payment is greater than 50,000 USD.
### Step 1: Pre-requisites
For this example, you will need:
- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started).
-- [Python 3.7+ installed](https://www.python.org/downloads/).
+- Java JDK 11 (or greater):
+ - [Microsoft JDK 11](https://docs.microsoft.com/java/openjdk/download#openjdk-11)
+ - [Oracle JDK 11](https://www.oracle.com/technetwork/java/javase/downloads/index.html#JDK11)
+ - [OpenJDK 11](https://jdk.java.net/11/)
+- [Apache Maven](https://maven.apache.org/install.html) version 3.x.
- [Docker Desktop](https://www.docker.com/products/docker-desktop)
@@ -287,16 +545,16 @@ Clone the [sample provided in the Quickstarts repo](https://github.com/dapr/quic
git clone https://github.com/dapr/quickstarts.git
```
-In a new terminal window, navigate to the `order-processor` directory:
+Navigate to the `order-processor` directory:
```bash
-cd workflows/python/sdk/order-processor
+cd workflows/java/sdk/order-processor
```
-Install the Dapr Python SDK package:
+Install the dependencies:
```bash
-pip3 install -r requirements.txt
+mvn clean install
```
### Step 3: Run the order processor app
@@ -304,54 +562,70 @@ pip3 install -r requirements.txt
In the terminal, start the order processor app alongside a Dapr sidecar:
```bash
-dapr run --app-id order-processor --resources-path ../../../components/ -- python3 app.py
+dapr run --app-id WorkflowConsoleApp --resources-path ../../../components/ --dapr-grpc-port 50001 -- java -jar target/OrderProcessingService-0.0.1-SNAPSHOT.jar io.dapr.quickstarts.workflows.WorkflowConsoleApp
```
-> **Note:** Since Python3.exe is not defined in Windows, you may need to use `python app.py` instead of `python3 app.py`.
-
This starts the `order-processor` app with unique workflow ID and runs the workflow activities.
Expected output:
-```bash
+```
+== APP == *** Welcome to the Dapr Workflow console app sample!
+== APP == *** Using this app, you can place orders that start workflows.
+== APP == Start workflow runtime
+== APP == Sep 20, 2023 3:23:05 PM com.microsoft.durabletask.DurableTaskGrpcWorker startAndBlock
+== APP == INFO: Durable Task worker is connecting to sidecar at 127.0.0.1:50001.
+
+== APP == ==========Begin the purchase of item:==========
== APP == Starting order workflow, purchasing 10 of cars
-== APP == 2023-06-06 09:35:52.945 durabletask-worker INFO: Successfully connected to 127.0.0.1:65406. Waiting for work items...
-== APP == INFO:NotifyActivity:Received order f4e1926e-3721-478d-be8a-f5bebd1995da for 10 cars at $150000 !
-== APP == INFO:VerifyInventoryActivity:Verifying inventory for order f4e1926e-3721-478d-be8a-f5bebd1995da of 10 cars
-== APP == INFO:VerifyInventoryActivity:There are 100 Cars available for purchase
-== APP == INFO:RequestApprovalActivity:Requesting approval for payment of 165000 USD for 10 cars
-== APP == 2023-06-06 09:36:05.969 durabletask-worker INFO: f4e1926e-3721-478d-be8a-f5bebd1995da Event raised: manager_approval
-== APP == INFO:NotifyActivity:Payment for order f4e1926e-3721-478d-be8a-f5bebd1995da has been approved!
-== APP == INFO:ProcessPaymentActivity:Processing payment: f4e1926e-3721-478d-be8a-f5bebd1995da for 10 cars at 150000 USD
-== APP == INFO:ProcessPaymentActivity:Payment for request ID f4e1926e-3721-478d-be8a-f5bebd1995da processed successfully
-== APP == INFO:UpdateInventoryActivity:Checking inventory for order f4e1926e-3721-478d-be8a-f5bebd1995da for 10 cars
-== APP == INFO:UpdateInventoryActivity:There are now 90 cars left in stock
-== APP == INFO:NotifyActivity:Order f4e1926e-3721-478d-be8a-f5bebd1995da has completed!
-== APP == 2023-06-06 09:36:06.106 durabletask-worker INFO: f4e1926e-3721-478d-be8a-f5bebd1995da: Orchestration completed with status: COMPLETED
-== APP == Workflow completed! Result: Completed
-== APP == Purchase of item is Completed
+
+== APP == scheduled new workflow instance of OrderProcessingWorkflow with instance ID: edceba90-9c45-4be8-ad40-60d16e060797
+== APP == [Thread-0] INFO io.dapr.workflows.WorkflowContext - Starting Workflow: io.dapr.quickstarts.workflows.OrderProcessingWorkflow
+== APP == [Thread-0] INFO io.dapr.workflows.WorkflowContext - Instance ID(order ID): edceba90-9c45-4be8-ad40-60d16e060797
+== APP == [Thread-0] INFO io.dapr.workflows.WorkflowContext - Current Orchestration Time: 2023-09-20T19:23:09.755Z
+== APP == [Thread-0] INFO io.dapr.workflows.WorkflowContext - Received Order: OrderPayload [itemName=cars, totalCost=150000, quantity=10]
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.NotifyActivity - Received Order: OrderPayload [itemName=cars, totalCost=150000, quantity=10]
+== APP == workflow instance edceba90-9c45-4be8-ad40-60d16e060797 started
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.ReserveInventoryActivity - Reserving inventory for order 'edceba90-9c45-4be8-ad40-60d16e060797' of 10 cars
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.ReserveInventoryActivity - There are 100 cars available for purchase
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.ReserveInventoryActivity - Reserved inventory for order 'edceba90-9c45-4be8-ad40-60d16e060797' of 10 cars
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.RequestApprovalActivity - Requesting approval for order: OrderPayload [itemName=cars, totalCost=150000, quantity=10]
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.RequestApprovalActivity - Approved requesting approval for order: OrderPayload [itemName=cars, totalCost=150000, quantity=10]
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.ProcessPaymentActivity - Processing payment: edceba90-9c45-4be8-ad40-60d16e060797 for 10 cars at $150000
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.ProcessPaymentActivity - Payment for request ID 'edceba90-9c45-4be8-ad40-60d16e060797' processed successfully
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.UpdateInventoryActivity - Updating inventory for order 'edceba90-9c45-4be8-ad40-60d16e060797' of 10 cars
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.UpdateInventoryActivity - Updated inventory for order 'edceba90-9c45-4be8-ad40-60d16e060797': there are now 90 cars left in stock
+== APP == [Thread-0] INFO io.dapr.quickstarts.workflows.activities.NotifyActivity - Order completed! : edceba90-9c45-4be8-ad40-60d16e060797
+
+== APP == workflow instance edceba90-9c45-4be8-ad40-60d16e060797 completed, out is: {"processed":true}
```
### (Optional) Step 4: View in Zipkin
-If you have Zipkin configured for Dapr locally on your machine, you can view the workflow trace spans in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
+Running `dapr init` launches the [openzipkin/zipkin](https://hub.docker.com/r/openzipkin/zipkin/) Docker container. If the container has stopped running, launch the Zipkin Docker container with the following command:
-
+```
+docker run -d -p 9411:9411 openzipkin/zipkin
+```
+
+View the workflow trace spans in the Zipkin web UI (typically at `http://localhost:9411/zipkin/`).
+
+
### What happened?
-When you ran `dapr run --app-id order-processor --resources-path ../../../components/ -- python3 app.py`:
+When you ran `dapr run`:
-1. A unique order ID for the workflow is generated (in the above example, `f4e1926e-3721-478d-be8a-f5bebd1995da`) and the workflow is scheduled.
+1. A unique order ID for the workflow is generated (in the above example, `edceba90-9c45-4be8-ad40-60d16e060797`) and the workflow is scheduled.
1. The `NotifyActivity` workflow activity sends a notification saying an order for 10 cars has been received.
1. The `ReserveInventoryActivity` workflow activity checks the inventory data, determines if you can supply the ordered item, and responds with the number of cars in stock.
-1. Your workflow starts and notifies you of its status.
-1. The `ProcessPaymentActivity` workflow activity begins processing payment for order `f4e1926e-3721-478d-be8a-f5bebd1995da` and confirms if successful.
+1. Once approved, your workflow starts and notifies you of its status.
+1. The `ProcessPaymentActivity` workflow activity begins processing payment for order `edceba90-9c45-4be8-ad40-60d16e060797` and confirms if successful.
1. The `UpdateInventoryActivity` workflow activity updates the inventory with the current available cars after the order has been processed.
-1. The `NotifyActivity` workflow activity sends a notification saying that order `f4e1926e-3721-478d-be8a-f5bebd1995da` has completed.
+1. The `NotifyActivity` workflow activity sends a notification saying that order `edceba90-9c45-4be8-ad40-60d16e060797` has completed.
1. The workflow terminates as completed.
-#### `order-processor/app.py`
+#### `order-processor/WorkflowConsoleApp.java`
In the application's program file:
- The unique workflow order ID is generated
@@ -359,151 +633,227 @@ In the application's program file:
- The workflow status is retrieved
- The workflow and the workflow activities it invokes are registered
-```python
-class WorkflowConsoleApp:
- def main(self):
- # Register workflow and activities
- workflowRuntime = WorkflowRuntime(settings.DAPR_RUNTIME_HOST, settings.DAPR_GRPC_PORT)
- workflowRuntime.register_workflow(order_processing_workflow)
- workflowRuntime.register_activity(notify_activity)
- workflowRuntime.register_activity(requst_approval_activity)
- workflowRuntime.register_activity(verify_inventory_activity)
- workflowRuntime.register_activity(process_payment_activity)
- workflowRuntime.register_activity(update_inventory_activity)
- workflowRuntime.start()
-
- print("==========Begin the purchase of item:==========", flush=True)
- item_name = default_item_name
- order_quantity = 10
+```java
+package io.dapr.quickstarts.workflows;
+import io.dapr.client.DaprClient;
+import io.dapr.client.DaprClientBuilder;
+import io.dapr.workflows.client.DaprWorkflowClient;
+
+public class WorkflowConsoleApp {
+
+ private static final String STATE_STORE_NAME = "statestore-actors";
+
+ // ...
+ public static void main(String[] args) throws Exception {
+ System.out.println("*** Welcome to the Dapr Workflow console app sample!");
+ System.out.println("*** Using this app, you can place orders that start workflows.");
+ // Wait for the sidecar to become available
+ Thread.sleep(5 * 1000);
+
+ // Register the OrderProcessingWorkflow and its activities with the builder.
+ WorkflowRuntimeBuilder builder = new WorkflowRuntimeBuilder().registerWorkflow(OrderProcessingWorkflow.class);
+ builder.registerActivity(NotifyActivity.class);
+ builder.registerActivity(ProcessPaymentActivity.class);
+ builder.registerActivity(RequestApprovalActivity.class);
+ builder.registerActivity(ReserveInventoryActivity.class);
+ builder.registerActivity(UpdateInventoryActivity.class);
+
+ // Build the workflow runtime
+ try (WorkflowRuntime runtime = builder.build()) {
+ System.out.println("Start workflow runtime");
+ runtime.start(false);
+ }
- total_cost = int(order_quantity) * baseInventory[item_name].per_item_cost
- order = OrderPayload(item_name=item_name, quantity=int(order_quantity), total_cost=total_cost)
+ InventoryItem inventory = prepareInventoryAndOrder();
- # Start Workflow
- print(f'Starting order workflow, purchasing {order_quantity} of {item_name}', flush=True)
- start_resp = daprClient.start_workflow(workflow_component=workflow_component,
- workflow_name=workflow_name,
- input=order)
- _id = start_resp.instance_id
-
- def prompt_for_approval(daprClient: DaprClient):
- daprClient.raise_workflow_event(instance_id=_id, workflow_component=workflow_component,
- event_name="manager_approval", event_data={'approval': True})
+ DaprWorkflowClient workflowClient = new DaprWorkflowClient();
+ try (workflowClient) {
+ executeWorkflow(workflowClient, inventory);
+ }
- approval_seeked = False
- start_time = datetime.now()
- while True:
- time_delta = datetime.now() - start_time
- state = daprClient.get_workflow(instance_id=_id, workflow_component=workflow_component)
- if not state:
- print("Workflow not found!") # not expected
- elif state.runtime_status == "Completed" or\
- state.runtime_status == "Failed" or\
- state.runtime_status == "Terminated":
- print(f'Workflow completed! Result: {state.runtime_status}', flush=True)
- break
- if time_delta.total_seconds() >= 10:
- state = daprClient.get_workflow(instance_id=_id, workflow_component=workflow_component)
- if total_cost > 50000 and (
- state.runtime_status != "Completed" or
- state.runtime_status != "Failed" or
- state.runtime_status != "Terminated"
- ) and not approval_seeked:
- approval_seeked = True
- threading.Thread(target=prompt_for_approval(daprClient), daemon=True).start()
-
- print("Purchase of item is ", state.runtime_status, flush=True)
+ }
+
+ // Start the workflow runtime, pulling and executing tasks
+ private static void executeWorkflow(DaprWorkflowClient workflowClient, InventoryItem inventory) {
+ System.out.println("==========Begin the purchase of item:==========");
+ String itemName = inventory.getName();
+ int orderQuantity = inventory.getQuantity();
+ int totalcost = orderQuantity * inventory.getPerItemCost();
+ OrderPayload order = new OrderPayload();
+ order.setItemName(itemName);
+ order.setQuantity(orderQuantity);
+ order.setTotalCost(totalcost);
+ System.out.println("Starting order workflow, purchasing " + orderQuantity + " of " + itemName);
+
+ String instanceId = workflowClient.scheduleNewWorkflow(OrderProcessingWorkflow.class, order);
+ System.out.printf("scheduled new workflow instance of OrderProcessingWorkflow with instance ID: %s%n",
+ instanceId);
+
+ // Check workflow instance start status
+ try {
+ workflowClient.waitForInstanceStart(instanceId, Duration.ofSeconds(10), false);
+ System.out.printf("workflow instance %s started%n", instanceId);
+ } catch (TimeoutException e) {
+ System.out.printf("workflow instance %s did not start within 10 seconds%n", instanceId);
+ return;
+ }
- def restock_inventory(self, daprClient: DaprClient, baseInventory):
- for key, item in baseInventory.items():
- print(f'item: {item}')
- item_str = f'{{"name": "{item.item_name}", "quantity": {item.quantity},\
- "per_item_cost": {item.per_item_cost}}}'
- daprClient.save_state("statestore-actors", key, item_str)
+ // Check workflow instance complete status
+ try {
+ WorkflowInstanceStatus workflowStatus = workflowClient.waitForInstanceCompletion(instanceId,
+ Duration.ofSeconds(30),
+ true);
+ if (workflowStatus != null) {
+ System.out.printf("workflow instance %s completed, out is: %s %n", instanceId,
+ workflowStatus.getSerializedOutput());
+ } else {
+ System.out.printf("workflow instance %s not found%n", instanceId);
+ }
+ } catch (TimeoutException e) {
+ System.out.printf("workflow instance %s did not complete within 30 seconds%n", instanceId);
+ }
-if __name__ == '__main__':
- app = WorkflowConsoleApp()
- app.main()
+ }
+
+ private static InventoryItem prepareInventoryAndOrder() {
+ // prepare 100 cars in inventory
+ InventoryItem inventory = new InventoryItem();
+ inventory.setName("cars");
+ inventory.setPerItemCost(15000);
+ inventory.setQuantity(100);
+ DaprClient daprClient = new DaprClientBuilder().build();
+ restockInventory(daprClient, inventory);
+
+ // prepare order for 10 cars
+ InventoryItem order = new InventoryItem();
+ order.setName("cars");
+ order.setPerItemCost(15000);
+ order.setQuantity(10);
+ return order;
+ }
+
+ private static void restockInventory(DaprClient daprClient, InventoryItem inventory) {
+ String key = inventory.getName();
+ daprClient.saveState(STATE_STORE_NAME, key, inventory).block();
+ }
+}
```
-#### `order-processor/workflow.py`
-
-In `workflow.py`, the workflow is defined as a class with all of its associated tasks (determined by workflow activities).
+#### `OrderProcessingWorkflow.java`
+
+In `OrderProcessingWorkflow.java`, the workflow is defined as a class with all of its associated tasks (determined by workflow activities).
+
+```java
+package io.dapr.quickstarts.workflows;
+import io.dapr.workflows.Workflow;
+
+public class OrderProcessingWorkflow extends Workflow {
+
+ @Override
+ public WorkflowStub create() {
+ return ctx -> {
+ Logger logger = ctx.getLogger();
+ String orderId = ctx.getInstanceId();
+ logger.info("Starting Workflow: " + ctx.getName());
+ logger.info("Instance ID(order ID): " + orderId);
+ logger.info("Current Orchestration Time: " + ctx.getCurrentInstant());
+
+ OrderPayload order = ctx.getInput(OrderPayload.class);
+ logger.info("Received Order: " + order.toString());
+ OrderResult orderResult = new OrderResult();
+ orderResult.setProcessed(false);
+
+ // Notify the user that an order has come through
+ Notification notification = new Notification();
+ notification.setMessage("Received Order: " + order.toString());
+ ctx.callActivity(NotifyActivity.class.getName(), notification).await();
+
+ // Determine if there is enough of the item available for purchase by checking
+ // the inventory
+ InventoryRequest inventoryRequest = new InventoryRequest();
+ inventoryRequest.setRequestId(orderId);
+ inventoryRequest.setItemName(order.getItemName());
+ inventoryRequest.setQuantity(order.getQuantity());
+ InventoryResult inventoryResult = ctx.callActivity(ReserveInventoryActivity.class.getName(),
+ inventoryRequest, InventoryResult.class).await();
+
+ // If there is insufficient inventory, fail and let the user know
+ if (!inventoryResult.isSuccess()) {
+ notification.setMessage("Insufficient inventory for order : " + order.getItemName());
+ ctx.callActivity(NotifyActivity.class.getName(), notification).await();
+ ctx.complete(orderResult);
+ return;
+ }
+
+ // Require orders over a certain threshold to be approved
+ if (order.getTotalCost() > 5000) {
+ ApprovalResult approvalResult = ctx.callActivity(RequestApprovalActivity.class.getName(),
+ order, ApprovalResult.class).await();
+ if (approvalResult != ApprovalResult.Approved) {
+ notification.setMessage("Order " + order.getItemName() + " was not approved.");
+ ctx.callActivity(NotifyActivity.class.getName(), notification).await();
+ ctx.complete(orderResult);
+ return;
+ }
+ }
+
+ // There is enough inventory available so the user can purchase the item(s).
+ // Process their payment
+ PaymentRequest paymentRequest = new PaymentRequest();
+ paymentRequest.setRequestId(orderId);
+ paymentRequest.setItemBeingPurchased(order.getItemName());
+ paymentRequest.setQuantity(order.getQuantity());
+ paymentRequest.setAmount(order.getTotalCost());
+ boolean isOK = ctx.callActivity(ProcessPaymentActivity.class.getName(),
+ paymentRequest, boolean.class).await();
+ if (!isOK) {
+ notification.setMessage("Payment failed for order : " + orderId);
+ ctx.callActivity(NotifyActivity.class.getName(), notification).await();
+ ctx.complete(orderResult);
+ return;
+ }
+
+ inventoryResult = ctx.callActivity(UpdateInventoryActivity.class.getName(),
+ inventoryRequest, InventoryResult.class).await();
+ if (!inventoryResult.isSuccess()) {
+ // If there is an error updating the inventory, refund the user
+ // paymentRequest.setAmount(-1 * paymentRequest.getAmount());
+ // ctx.callActivity(ProcessPaymentActivity.class.getName(),
+ // paymentRequest).await();
+
+ // Let users know their payment processing failed
+ notification.setMessage("Order failed to update inventory! : " + orderId);
+ ctx.callActivity(NotifyActivity.class.getName(), notification).await();
+ ctx.complete(orderResult);
+ return;
+ }
+
+ // Let user know their order was processed
+ notification.setMessage("Order completed! : " + orderId);
+ ctx.callActivity(NotifyActivity.class.getName(), notification).await();
+
+ // Complete the workflow with order result is processed
+ orderResult.setProcessed(true);
+ ctx.complete(orderResult);
+ };
+ }
+
+}
+```
-```python
- def order_processing_workflow(ctx: DaprWorkflowContext, order_payload_str: OrderPayload):
- """Defines the order processing workflow.
- When the order is received, the inventory is checked to see if there is enough inventory to
- fulfill the order. If there is enough inventory, the payment is processed and the inventory is
- updated. If there is not enough inventory, the order is rejected.
- If the total order is greater than $50,000, the order is sent to a manager for approval.
- """
- order_id = ctx.instance_id
- order_payload=json.loads(order_payload_str)
- yield ctx.call_activity(notify_activity,
- input=Notification(message=('Received order ' +order_id+ ' for '
- +f'{order_payload["quantity"]}' +' ' +f'{order_payload["item_name"]}'
- +' at $'+f'{order_payload["total_cost"]}' +' !')))
- result = yield ctx.call_activity(verify_inventory_activity,
- input=InventoryRequest(request_id=order_id,
- item_name=order_payload["item_name"],
- quantity=order_payload["quantity"]))
- if not result.success:
- yield ctx.call_activity(notify_activity,
- input=Notification(message='Insufficient inventory for '
- +f'{order_payload["item_name"]}'+'!'))
- return OrderResult(processed=False)
-
- if order_payload["total_cost"] > 50000:
- yield ctx.call_activity(requst_approval_activity, input=order_payload)
- approval_task = ctx.wait_for_external_event("manager_approval")
- timeout_event = ctx.create_timer(timedelta(seconds=200))
- winner = yield when_any([approval_task, timeout_event])
- if winner == timeout_event:
- yield ctx.call_activity(notify_activity,
- input=Notification(message='Payment for order '+order_id
- +' has been cancelled due to timeout!'))
- return OrderResult(processed=False)
- approval_result = yield approval_task
- if approval_result["approval"]:
- yield ctx.call_activity(notify_activity, input=Notification(
- message=f'Payment for order {order_id} has been approved!'))
- else:
- yield ctx.call_activity(notify_activity, input=Notification(
- message=f'Payment for order {order_id} has been rejected!'))
- return OrderResult(processed=False)
-
- yield ctx.call_activity(process_payment_activity, input=PaymentRequest(
- request_id=order_id, item_being_purchased=order_payload["item_name"],
- amount=order_payload["total_cost"], quantity=order_payload["quantity"]))
+#### `activities` directory
- try:
- yield ctx.call_activity(update_inventory_activity,
- input=PaymentRequest(request_id=order_id,
- item_being_purchased=order_payload["item_name"],
- amount=order_payload["total_cost"],
- quantity=order_payload["quantity"]))
- except Exception:
- yield ctx.call_activity(notify_activity,
- input=Notification(message=f'Order {order_id} Failed!'))
- return OrderResult(processed=False)
+The `Activities` directory holds the four workflow activities used by the workflow, defined in the following files:
+- [`NotifyActivity.java`](https://github.com/dapr/quickstarts/tree/master/workflows/java/sdk/order-processor/src/main/java/io/dapr/quickstarts/workflows/activities/NotifyActivity.java)
+- [`RequestApprovalActivity`](https://github.com/dapr/quickstarts/tree/master/workflows/java/sdk/order-processor/src/main/java/io/dapr/quickstarts/workflows/activities/RequestApprovalActivity.java)
+- [`ReserveInventoryActivity`](https://github.com/dapr/quickstarts/tree/master/workflows/java/sdk/order-processor/src/main/java/io/dapr/quickstarts/workflows/activities/ReserveInventoryActivity.java)
+- [`ProcessPaymentActivity`](https://github.com/dapr/quickstarts/tree/master/workflows/java/sdk/order-processor/src/main/java/io/dapr/quickstarts/workflows/activities/ProcessPaymentActivity.java)
+- [`UpdateInventoryActivity`](https://github.com/dapr/quickstarts/tree/master/workflows/java/sdk/order-processor/src/main/java/io/dapr/quickstarts/workflows/activities/UpdateInventoryActivity.java)
- yield ctx.call_activity(notify_activity, input=Notification(
- message=f'Order {order_id} has completed!'))
- return OrderResult(processed=True)
-```
{{% /codetab %}}
-
{{< /tabs >}}
-## Watch the demo
-
-Watch [this video to walk through the Dapr Workflow .NET demo](https://youtu.be/BxiKpEmchgQ?t=2564):
-
-
-
-
## Tell us what you think!
We're continuously working to improve our Quickstart examples and value your feedback. Did you find this Quickstart helpful? Do you have suggestions for improvement?
@@ -515,4 +865,4 @@ Join the discussion in our [discord channel](https://discord.com/channels/778680
- Walk through a more in-depth [.NET SDK example workflow](https://github.com/dapr/dotnet-sdk/tree/master/examples/Workflow)
- Learn more about [Workflow as a Dapr building block]({{< ref workflow-overview >}})
-{{< button text="Explore Dapr tutorials >>" page="getting-started/tutorials/_index.md" >}}
+{{< button text="Explore Dapr tutorials >>" page="getting-started/tutorials/_index.md" >}}
\ No newline at end of file
diff --git a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md
index bdc60e48928..d88ec29209d 100644
--- a/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md
+++ b/daprdocs/content/en/operations/hosting/kubernetes/kubernetes-deploy.md
@@ -201,7 +201,7 @@ helm repo add dapr https://dapr.github.io/helm-charts/
helm repo update
kubectl create namespace dapr-system
# Install the Dapr dashboard
-helm install dapr dapr/dapr-dashboard --namespace dapr-system
+helm install dapr-dashboard dapr/dapr-dashboard --namespace dapr-system
```
### Verify installation
diff --git a/daprdocs/content/en/operations/support/alpha-apis.md b/daprdocs/content/en/operations/support/alpha-beta-apis.md
similarity index 76%
rename from daprdocs/content/en/operations/support/alpha-apis.md
rename to daprdocs/content/en/operations/support/alpha-beta-apis.md
index 3e2e26354ee..c35d4fc4240 100644
--- a/daprdocs/content/en/operations/support/alpha-apis.md
+++ b/daprdocs/content/en/operations/support/alpha-beta-apis.md
@@ -1,16 +1,27 @@
---
type: docs
-title: "Alpha APIs"
-linkTitle: "Alpha APIs"
+title: "Alpha and Beta APIs"
+linkTitle: "Alpha & Beta APIs"
weight: 5000
-description: "List of current alpha APIs"
+description: "List of current alpha and beta APIs"
---
+## Alpha APIs
+
| Building block/API | gRPC | HTTP | Description | Documentation | Version introduced |
| ------------------ | ---- | ---- | ----------- | ------------- | ------------------ |
| Query State | [Query State proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/dapr.proto#L44) | `v1.0-alpha1/state/statestore/query` | The state query API enables you to retrieve, filter, and sort the key/value data stored in state store components. | [Query State API]({{< ref "howto-state-query-api.md" >}}) | v1.5 |
| Distributed Lock | [Lock proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/dapr.proto#L112) | `/v1.0-alpha1/lock` | The distributed lock API enables you to take a lock on a resource. | [Distributed Lock API]({{< ref "distributed-lock-api-overview.md" >}}) | v1.8 |
-| Workflow | [Workflow proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/dapr.proto#L151) | `/v1.0-alpha1/workflow` | The workflow API enables you to define long running, persistent processes or data flows. | [Workflow API]({{< ref "workflow-overview.md" >}}) | v1.10 |
| Bulk Publish | [Bulk publish proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/dapr.proto#L59) | `v1.0-alpha1/publish/bulk` | The bulk publish API allows you to publish multiple messages to a topic in a single request. | [Bulk Publish and Subscribe API]({{< ref "pubsub-bulk.md" >}}) | v1.10 |
| Bulk Subscribe | [Bulk subscribe proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/appcallback.proto#L57) | N/A | The bulk subscribe application callback receives multiple messages from a topic in a single call. | [Bulk Publish and Subscribe API]({{< ref "pubsub-bulk.md" >}}) | v1.10 |
| Cryptography | [Crypto proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/dapr.proto#L118) | `v1.0-alpha1/crypto` | The cryptography API enables you to perform **high level** cryptography operations for encrypting and decrypting messages. | [Cryptography API]({{< ref "cryptography-overview.md" >}}) | v1.11 |
+
+## Beta APIs
+
+| Building block/API | gRPC | HTTP | Description | Documentation | Version introduced |
+| ------------------ | ---- | ---- | ----------- | ------------- | ------------------ |
+| Workflow | [Workflow proto](https://github.com/dapr/dapr/blob/5aba3c9aa4ea9b3f388df125f9c66495b43c5c9e/dapr/proto/runtime/v1/dapr.proto#L151) | `/v1.0-beta1/workflow` | The workflow API enables you to define long running, persistent processes or data flows. | [Workflow API]({{< ref "workflow-overview.md" >}}) | v1.10 |
+
+## Related links
+
+[Learn more about the Alpha, Beta, and Stable lifecycle stages.]({{< ref "certification-lifecycle.md#certification-levels" >}})
\ No newline at end of file
diff --git a/daprdocs/content/en/operations/support/support-preview-features.md b/daprdocs/content/en/operations/support/support-preview-features.md
index 64d7fa1844f..9c9fff69010 100644
--- a/daprdocs/content/en/operations/support/support-preview-features.md
+++ b/daprdocs/content/en/operations/support/support-preview-features.md
@@ -15,36 +15,11 @@ For CLI there is no explicit opt-in, just the version that this was first made a
| Feature | Description | Setting | Documentation | Version introduced |
| --- | --- | --- | --- | --- |
-| **Streaming for HTTP service invocation** | Enables (partial) support for using streams in HTTP service invocation; see below for more details. | `ServiceInvocationStreaming` | [Details]({{< ref "support-preview-features.md#streaming-for-http-service-invocation" >}}) | v1.10 |
| **Pluggable components** | Allows creating self-hosted gRPC-based components written in any language that supports gRPC. The following component APIs are supported: State stores, Pub/sub, Bindings | N/A | [Pluggable components concept]({{[}})| v1.9 |
-| **Multi-App Run** | Configure multiple Dapr applications from a single configuration file and run from a single command | `dapr run -f` | [Multi-App Run]({{< ref multi-app-dapr-run.md >}}) | v1.10 |
+| **Multi-App Run for Kubernetes** | Configure multiple Dapr applications from a single configuration file and run from a single command on Kubernetes | `dapr run -k -f` | [Multi-App Run]({{< ref multi-app-dapr-run.md >}}) | v1.12 |
| **Workflows** | Author workflows as code to automate and orchestrate tasks within your application, like messaging, state management, and failure handling | N/A | [Workflows concept]({{< ref "components-concept#workflows" >}})| v1.10 |
| **Cryptography** | Encrypt or decrypt data without having to manage secrets keys | N/A | [Cryptography concept]({{< ref "components-concept#cryptography" >}})| v1.11 |
| **Service invocation for non-Dapr endpoints** | Allow the invocation of non-Dapr endpoints by Dapr using the [Service invocation API]({{< ref service_invocation_api.md >}}). Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information. | N/A | [Service invocation API]({{< ref service_invocation_api.md >}}) | v1.11 |
| **Actor State TTL** | Allow actors to save records to state stores with Time To Live (TTL) set to automatically clean up old data. In its current implementation, actor state with TTL may not be reflected correctly by clients, read [Actor State Transactions]({{< ref actors_api.md >}}) for more information. | `ActorStateTTL` | [Actor State Transactions]({{< ref actors_api.md >}}) | v1.11 |
+| **Transactional Outbox** | Allows state operations for inserts and updates to be published to a configured pub/sub topic using a single transaction across the state store and the pub/sub | N/A | [Transactional Outbox Feature]({{< ref howto-outbox.md >}}) | v1.12 |
-### Streaming for HTTP service invocation
-
-Running Dapr with the `ServiceInvocationStreaming` feature flag enables partial support for handling data as a stream in HTTP service invocation. This can offer improvements in performance and memory utilization when using Dapr to invoke another service using HTTP with large request or response bodies.
-
-The table below summarizes the current state of support for streaming in HTTP service invocation in Dapr, including the impact of enabling `ServiceInvocationStreaming`, in the example where "app A" is invoking "app B" using Dapr. There are six steps in the data flow, with various levels of support for handling data as a stream:
-
-]
-
-| Step | Handles data as a stream | Dapr 1.11 | Dapr 1.11 with `ServiceInvocationStreaming` |
-|:---:|---|:---:|:---:|
-| 1 | Request: "App A" to "Dapr sidecar A | ❌ | ❌ |
-| 2 | Request: "Dapr sidecar A" to "Dapr sidecar B | ❌ | ✅ |
-| 3 | Request: "Dapr sidecar B" to "App B" | ✅ | ✅ |
-| 4 | Response: "App B" to "Dapr sidecar B" | ✅ | ✅ |
-| 5 | Response: "Dapr sidecar B" to "Dapr sidecar A | ❌ | ✅ |
-| 6 | Response: "Dapr sidecar A" to "App A | ❌ | ❌ |
-
-Important notes:
-
-- `ServiceInvocationStreaming` needs to be applied on caller sidecars only.
- In the example above, streams are used for HTTP service invocation if `ServiceInvocationStreaming` is applied to the configuration of "app A" and its Dapr sidecar, regardless of whether the feature flag is enabled for "app B" and its sidecar.
-- When `ServiceInvocationStreaming` is enabled, you should make sure that all services your app invokes using Dapr ("app B") are updated to Dapr 1.10 or higher, even if `ServiceInvocationStreaming` is not enabled for those sidecars.
- Invoking an app using Dapr 1.9 or older is still possible, but those calls may fail unless you have applied a Dapr Resiliency policy with retries enabled.
-
-> Full support for streaming for HTTP service invocation will be completed in a future Dapr version.
diff --git a/daprdocs/content/en/operations/support/support-release-policy.md b/daprdocs/content/en/operations/support/support-release-policy.md
index 915042374eb..8dd71a45723 100644
--- a/daprdocs/content/en/operations/support/support-release-policy.md
+++ b/daprdocs/content/en/operations/support/support-release-policy.md
@@ -45,10 +45,11 @@ The table below shows the versions of Dapr releases that have been tested togeth
| Release date | Runtime | CLI | SDKs | Dashboard | Status | Release notes |
|--------------------|:--------:|:--------|---------|---------|---------|------------|
-| August 31st 2023 | 1.11.3 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.3 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.3) |
-| July 20th 2023 | 1.11.2 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.2 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.2) |
-| June 22nd 2023 | 1.11.1 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.1 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.1) |
-| June 12th 2023 | 1.11.0 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported (current) | [v1.11.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.0) |
+| October 11th 2023 | 1.12.0 | 1.12.0 | Java 1.10.0 Go 1.9.0 PHP 1.1.0 Python 1.11.0 .NET 1.12.0 JS 3.1.2 | 0.13.0 | Supported (current) | [v1.12.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.12.0) |
+| August 31st 2023 | 1.11.3 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported | [v1.11.3 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.3) |
+| July 20th 2023 | 1.11.2 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported | [v1.11.2 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.2) |
+| June 22nd 2023 | 1.11.1 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported | [v1.11.1 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.1) |
+| June 12th 2023 | 1.11.0 | 1.11.0 | Java 1.9.0 Go 1.8.0 PHP 1.1.0 Python 1.10.0 .NET 1.11.0 JS 3.1.0 | 0.13.0 | Supported | [v1.11.0 release notes](https://github.com/dapr/dapr/releases/tag/v1.11.0) |
| July 20th 2023 | 1.10.9 | 1.10.0 | Java 1.8.0 Go 1.7.0 PHP 1.1.0 Python 1.9.0 .NET 1.10.0 JS 3.0.0 | 0.11.0 | Supported | [v1.10.9 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.9) |
| June 22nd 2023 | 1.10.8 | 1.10.0 | Java 1.8.0 Go 1.7.0 PHP 1.1.0 Python 1.9.0 .NET 1.10.0 JS 3.0.0 | 0.11.0 | Supported | [v1.10.8 release notes](https://github.com/dapr/dapr/releases/tag/v1.10.8) |
| May 15th 2023 | 1.10.7 | 1.10.0 | Java 1.8.0 Go 1.7.0 PHP 1.1.0 Python 1.9.0 .NET 1.10.0 JS 3.0.0 | 0.11.0 | Supported | |
@@ -81,16 +82,6 @@ The table below shows the versions of Dapr releases that have been tested togeth
| Apr 20th 2022 | 1.6.2 | 1.6.0 | Java 1.4.0 Go 1.3.1 PHP 1.1.0 Python 1.5.0 .NET 1.6.0 JS 2.0.0 | 0.9.0 | Unsupported | |
| Mar 25th 2022 | 1.6.1 | 1.6.0 | Java 1.4.0 Go 1.3.1 PHP 1.1.0 Python 1.5.0 .NET 1.6.0 JS 2.0.0 | 0.9.0 | Unsupported | |
| Jan 25th 2022 | 1.6.0 | 1.6.0 | Java 1.4.0 Go 1.3.1 PHP 1.1.0 Python 1.5.0 .NET 1.6.0 JS 2.0.0 | 0.9.0 | Unsupported | |
-| Mar 25th 2022 | 1.5.2 | 1.6.0 | Java 1.3.0 Go 1.3.0 PHP 1.1.0 Python 1.4.0 .NET 1.5.0 JS 1.0.2 | 0.9.0 | Unsupported | |
-| Dec 6th 2021 | 1.5.1 | 1.5.1 | Java 1.3.0 Go 1.3.0 PHP 1.1.0 Python 1.4.0 .NET 1.5.0 JS 1.0.2 | 0.9.0 | Unsupported | |
-| Nov 11th 2021 | 1.5.0 | 1.5.0 | Java 1.3.0 Go 1.3.0 PHP 1.1.0 Python 1.4.0 .NET 1.5.0 JS 1.0.2 | 0.9.0 | Unsupported | |
-| Dev 6th 2021 | 1.4.4 | 1.4.0 | Java 1.3.0 Go 1.2.0 PHP 1.1.0 Python 1.3.0 .NET 1.4.0 | 0.8.0 | Unsupported | |
-| Oct 7th 2021 | 1.4.3 | 1.4.0 | Java 1.3.0 Go 1.2.0 PHP 1.1.0 Python 1.3.0 .NET 1.4.0 | 0.8.0 | Unsupported | |
-| Sep 24th 2021 | 1.4.2 | 1.4.0 | Java 1.3.0 Go 1.2.0 PHP 1.1.0 Python 1.3.0 .NET 1.4.0 | 0.8.0 | Unsupported | |
-| Sep 22nd 2021 | 1.4.1 | 1.4.0 | Java 1.3.0 Go 1.2.0 PHP 1.1.0 Python 1.3.0 .NET 1.4.0 | 0.8.0 | Unsupported | |
-| Sep 15th 2021 | 1.4 | 1.4.0 | Java 1.3.0 Go 1.2.0 PHP 1.1.0 Python 1.3.0 .NET 1.4.0 | 0.8.0 | Unsupported | |
-| Sep 14th 2021 | 1.3.1 | 1.3.0 | Java 1.2.0 Go 1.2.0 PHP 1.1.0 Python 1.2.0 .NET 1.3.0 | 0.7.0 | Unsupported | |
-| Jul 26th 2021 | 1.3 | 1.3.0 | Java 1.2.0 Go 1.2.0 PHP 1.1.0 Python 1.2.0 .NET 1.3.0 | 0.7.0 | Unsupported | |
## Upgrade paths
@@ -122,7 +113,8 @@ General guidance on upgrading can be found for [self hosted mode]({{< ref self-h
| 1.8.0 to 1.8.6 | N/A | 1.9.6 |
| 1.9.0 | N/A | 1.9.6 |
| 1.10.0 | N/A | 1.10.8 |
-| 1.11.0 | N/A | 1.11.3 |
+| 1.11.0 | N/A | 1.11.4 |
+| 1.12.0 | N/A | 1.12.0 |
## Upgrade on Hosting platforms
diff --git a/daprdocs/content/en/reference/api/error_codes.md b/daprdocs/content/en/reference/api/error_codes.md
index d93bc0f1188..7de8c0a2c3c 100644
--- a/daprdocs/content/en/reference/api/error_codes.md
+++ b/daprdocs/content/en/reference/api/error_codes.md
@@ -3,7 +3,7 @@ type: docs
title: "Error codes returned by APIs"
linkTitle: "Error codes"
description: "Detailed reference of the Dapr API error codes"
-weight: 1200
+weight: 1300
---
For http calls made to Dapr runtime, when an error is encountered, an error json is returned in http response body. The json contains an error code and an descriptive error message, e.g.
diff --git a/daprdocs/content/en/reference/api/placement_api.md b/daprdocs/content/en/reference/api/placement_api.md
new file mode 100644
index 00000000000..a882eb8618e
--- /dev/null
+++ b/daprdocs/content/en/reference/api/placement_api.md
@@ -0,0 +1,79 @@
+---
+type: docs
+title: "Placement API reference"
+linkTitle: "Placement API"
+description: "Detailed documentation on the Placement API"
+weight: 1200
+---
+
+Dapr has an HTTP API `/placement/state` for placement service that exposes placement table information. The API is exposed on the sidecar on the same port as the healthz. This is an unauthenticated endpoint, and is disabled by default.
+
+To enable the placement metadata in self-hosted mode you can either set`DAPR_PLACEMENT_METADATA_ENABLED` environment variable or `metadata-enabled` command line args on the Placement service to `true` to. See [how to run the Placement service in self-hosted mode]({{< ref "self-hosted-no-docker.md#enable-actors" >}}).
+
+If you are using Helm for deployment of the Placement service on Kubernetes then to enable the placement metadata, set `dapr_placement.metadataEnabled` to `true`.
+
+## Usecase
+
+The placement table API can be used for retrieving the current placement table, which contains all the actors registered. This can be helpful for debugging and allows tools to extract and present information about actors.
+
+## HTTP Request
+
+```
+GET http://localhost:/placement/state
+```
+
+## HTTP Response Codes
+
+Code | Description
+---- | -----------
+200 | Placement tables information returned
+500 | Placement could not return the placement tables information
+
+## HTTP Response Body
+
+**Placement tables API Response Object**
+
+Name | Type | Description
+---- | ---- | -----------
+tableVersion | int | The placement table version
+hostList | [Actor Host Info](#actorhostinfo)[] | A json array of registered actors host info.
+
+ **Actor Host Info**
+
+Name | Type | Description
+---- | ---- | -----------
+name | string | The host:port address of the actor.
+appId | string | app id.
+actorTypes | json string array | List of actor types it hosts.
+updatedAt | timestamp | Timestamp of the actor registered/updated.
+
+## Examples
+
+```shell
+ curl localhost:8080/placement/state
+```
+
+```json
+{
+ "hostList": [{
+ "name": "198.18.0.1:49347",
+ "appId": "actor1",
+ "actorTypes": ["testActorType1", "testActorType3"],
+ "updatedAt": 1690274322325260000
+ },
+ {
+ "name": "198.18.0.2:49347",
+ "appId": "actor2",
+ "actorTypes": ["testActorType2"],
+ "updatedAt": 1690274322325260000
+ },
+ {
+ "name": "198.18.0.3:49347",
+ "appId": "actor2",
+ "actorTypes": ["testActorType2"],
+ "updatedAt": 1690274322325260000
+ }
+ ],
+ "tableVersion": 1
+}
+```
\ No newline at end of file
diff --git a/daprdocs/content/en/reference/api/workflow_api.md b/daprdocs/content/en/reference/api/workflow_api.md
index 80814852e0d..9f9c34de81a 100644
--- a/daprdocs/content/en/reference/api/workflow_api.md
+++ b/daprdocs/content/en/reference/api/workflow_api.md
@@ -6,6 +6,10 @@ description: "Detailed documentation on the workflow API"
weight: 900
---
+{{% alert title="Note" color="primary" %}}
+Dapr Workflow is currently in beta. [See known limitations for {{% dapr-latest-version cli="true" %}}]({{< ref "workflow-overview.md#limitations" >}}).
+{{% /alert %}}
+
Dapr provides users with the ability to interact with workflows and comes with a built-in `dapr` component.
## Start workflow request
@@ -13,7 +17,7 @@ Dapr provides users with the ability to interact with workflows and comes with a
Start a workflow instance with the given name and optionally, an instance ID.
```
-POST http://localhost:3500/v1.0-alpha1/workflows///start[?instanceID=]
+POST http://localhost:3500/v1.0-beta1/workflows///start[?instanceID=]
```
Note that workflow instance IDs can only contain alphanumeric characters, underscores, and dashes.
@@ -53,7 +57,7 @@ The API call will provide a response similar to this:
Terminate a running workflow instance with the given name and instance ID.
```
-POST http://localhost:3500/v1.0-alpha1/workflows///terminate
+POST http://localhost:3500/v1.0-beta1/workflows///terminate
```
### URL parameters
@@ -80,7 +84,7 @@ This API does not return any content.
For workflow components that support subscribing to external events, such as the Dapr Workflow engine, you can use the following "raise event" API to deliver a named event to a specific workflow instance.
```
-POST http://localhost:3500/v1.0-alpha1/workflows///raiseEvent/
+POST http://localhost:3500/v1.0-beta1/workflows///raiseEvent/
```
{{% alert title="Note" color="primary" %}}
@@ -113,7 +117,7 @@ None.
Pause a running workflow instance.
```
-POST http://localhost:3500/v1.0-alpha1/workflows///pause
+POST http://localhost:3500/v1.0-beta1/workflows///pause
```
### URL parameters
@@ -140,7 +144,7 @@ None.
Resume a paused workflow instance.
```
-POST http://localhost:3500/v1.0-alpha1/workflows///resume
+POST http://localhost:3500/v1.0-beta1/workflows///resume
```
### URL parameters
@@ -167,7 +171,7 @@ None.
Purge the workflow state from your state store with the workflow's instance ID.
```
-POST http://localhost:3500/v1.0-alpha1/workflows///purge
+POST http://localhost:3500/v1.0-beta1/workflows///purge
```
### URL parameters
@@ -194,7 +198,7 @@ None.
Get information about a given workflow instance.
```
-GET http://localhost:3500/v1.0-alpha1/workflows//
+GET http://localhost:3500/v1.0-beta1/workflows//
```
### URL parameters
diff --git a/daprdocs/content/en/reference/arguments-annotations-overview.md b/daprdocs/content/en/reference/arguments-annotations-overview.md
index a1c044a68d8..68eab9812c3 100644
--- a/daprdocs/content/en/reference/arguments-annotations-overview.md
+++ b/daprdocs/content/en/reference/arguments-annotations-overview.md
@@ -39,7 +39,7 @@ This table is meant to help users understand the equivalent options for running
| `--profiling-port` | `--profiling-port` | | not supported | The port for the profile server (default `7777`) |
| `--app-protocol` | `--app-protocol` | `-P` | `dapr.io/app-protocol` | Configures the protocol Dapr uses to communicate with your app. Valid options are `http`, `grpc`, `https` (HTTP with TLS), `grpcs` (gRPC with TLS), `h2c` (HTTP/2 Cleartext). Note that Dapr does not validate TLS certificates presented by the app. Default is `http` |
| `--enable-app-health-check` | `--enable-app-health-check` | | `dapr.io/enable-app-health-check` | Boolean that enables the [health checks]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `false`. |
-| `--app-health-check-path` | `--app-health-check-path` | | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC). Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `/health`. |
+| `--app-health-check-path` | `--app-health-check-path` | | `dapr.io/app-health-check-path` | Path that Dapr invokes for health probes when the app channel is HTTP (this value is ignored if the app channel is using gRPC). Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `/healthz`. |
| `--app-health-probe-interval` | `--app-health-probe-interval` | | `dapr.io/app-health-probe-interval` | Number of *seconds* between each health probe. Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `5` |
| `--app-health-probe-timeout` | `--app-health-probe-timeout` | | `dapr.io/app-health-probe-timeout` | Timeout in *milliseconds* for health probe requests. Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `500` |
| `--app-health-threshold` | `--app-health-threshold` | | `dapr.io/app-health-threshold"` | Max number of consecutive failures before the app is considered unhealthy. Requires [app health checks to be enabled]({{< ref "app-health.md#configuring-app-health-checks" >}}). Default is `3` |
diff --git a/daprdocs/content/en/reference/cli/dapr-init.md b/daprdocs/content/en/reference/cli/dapr-init.md
index a94c3e19a40..bdb93bf5d15 100644
--- a/daprdocs/content/en/reference/cli/dapr-init.md
+++ b/daprdocs/content/en/reference/cli/dapr-init.md
@@ -44,6 +44,9 @@ dapr init [flags]
| N/A | DAPR_HELM_REPO_USERNAME | A username for a private Helm chart | The username required to access the private Dapr Helm chart. If it can be accessed publicly, this env variable does not need to be set|
| N/A | DAPR_HELM_REPO_PASSWORD | A password for a private Helm chart |The password required to access the private Dapr Helm chart. If it can be accessed publicly, this env variable does not need to be set| |
| `--container-runtime` | | `docker` | Used to pass in a different container runtime other than Docker. Supported container runtimes are: `docker`, `podman` |
+| `--dev` | | | Creates Redis and Zipkin deployments when run in Kubernetes. |
+
+
### Examples
#### Self hosted environment
diff --git a/daprdocs/content/en/reference/cli/dapr-run.md b/daprdocs/content/en/reference/cli/dapr-run.md
index 9a519f98c72..b6fed8a8265 100644
--- a/daprdocs/content/en/reference/cli/dapr-run.md
+++ b/daprdocs/content/en/reference/cli/dapr-run.md
@@ -50,6 +50,7 @@ dapr run [flags] [command]
| `--unix-domain-socket`, `-u` | | | Path to a unix domain socket dir mount. If specified, communication with the Dapr sidecar uses unix domain sockets for lower latency and greater throughput when compared to using TCP ports. Not available on Windows. |
| `--dapr-http-max-request-size` | | `4` | Max size of the request body in MB. |
| `--dapr-http-read-buffer-size` | | `4` | Max size of the HTTP read buffer in KB. This also limits the maximum size of HTTP headers. The default 4 KB |
+| `--kubernetes`, `-k` | | | Running Dapr on Kubernetes, and used for [Multi-App Run template files on Kubernetes]({{< ref multi-app-dapr-run >}}). |
| `--components-path`, `-d` | | Linux/Mac: `$HOME/.dapr/components` Windows: `%USERPROFILE%\.dapr\components` | **Deprecated** in favor of `--resources-path` |
### Examples
@@ -81,4 +82,10 @@ dapr run --app-id myapp --app-port 3000 --enable-api-logging -- node myapp.js
# Pass multiple resource paths
dapr run --app-id myapp --resources-path path1 --resources-path path2
-```
+
+# Run the multi-app run template file
+dapr run -f dapr.yaml
+
+# Run the multi-app run template file on Kubernetes
+dapr run -k -f dapr.yaml
+```
\ No newline at end of file
diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/http.md b/daprdocs/content/en/reference/components-reference/supported-bindings/http.md
index 39355955457..a724e30b93a 100644
--- a/daprdocs/content/en/reference/components-reference/supported-bindings/http.md
+++ b/daprdocs/content/en/reference/components-reference/supported-bindings/http.md
@@ -9,7 +9,7 @@ aliases:
## Alternative
-The [service invocation API]({{< ref service_invocation_api.md >}}) allows for the invocation of non-Dapr HTTP endpoints and is the recommended approach. Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information.
+The [service invocation API]({{< ref service_invocation_api.md >}}) allows invoking non-Dapr HTTP endpoints and is the recommended approach. Read ["How-To: Invoke Non-Dapr Endpoints using HTTP"]({{< ref howto-invoke-non-dapr-endpoints.md >}}) for more information.
## Setup Dapr component
@@ -22,66 +22,73 @@ spec:
type: bindings.http
version: v1
metadata:
- - name: url
- value: "http://something.com"
- - name: MTLSRootCA
- value: "/Users/somepath/root.pem" # OPTIONAL Secret store ref, , or
- - name: MTLSClientCert
- value: "/Users/somepath/client.pem" # OPTIONAL Secret store ref, , or
- - name: MTLSClientKey
- value: "/Users/somepath/client.key" # OPTIONAL Secret store ref, , or
- - name: MTLSRenegotiation
- value: "RenegotiateOnceAsClient" # OPTIONAL one of: RenegotiateNever, RenegotiateOnceAsClient, RenegotiateFreelyAsClient
- - name: securityToken # OPTIONAL
- secretKeyRef:
- name: mysecret
- key: "mytoken"
- - name: securityTokenHeader
- value: "Authorization: Bearer" # OPTIONAL
- - name: direction
- value: "output"
+ - name: url
+ value: "http://something.com"
+ #- name: maxResponseBodySize
+ # value: "100Mi" # OPTIONAL maximum amount of data to read from a response
+ #- name: MTLSRootCA
+ # value: "/Users/somepath/root.pem" # OPTIONAL path to root CA or PEM-encoded string
+ #- name: MTLSClientCert
+ # value: "/Users/somepath/client.pem" # OPTIONAL path to client cert or PEM-encoded string
+ #- name: MTLSClientKey
+ # value: "/Users/somepath/client.key" # OPTIONAL path to client key or PEM-encoded string
+ #- name: MTLSRenegotiation
+ # value: "RenegotiateOnceAsClient" # OPTIONAL one of: RenegotiateNever, RenegotiateOnceAsClient, RenegotiateFreelyAsClient
+ #- name: securityToken # OPTIONAL
+ # secretKeyRef:
+ # name: mysecret
+ # key: "mytoken"
+ #- name: securityTokenHeader
+ # value: "Authorization: Bearer" # OPTIONAL
+ #- name: direction
+ # value: "output"
```
## Spec metadata fields
| Field | Required | Binding support | Details | Example |
|--------------------|:--------:|--------|--------|---------|
-| `url` | Y | Output |The base URL of the HTTP endpoint to invoke | `http://host:port/path`, `http://myservice:8000/customers`
-| `MTLSRootCA` | N | Output |Secret store reference, path to root ca certificate, or pem encoded string |
-| `MTLSClientCert` | N | Output |Secret store reference, path to client certificate, or pem encoded string |
-| `MTLSClientKey` | N | Output |Secret store reference, path client private key, or pem encoded string |
-| `MTLSRenegotiation` | N | Output |Type of TLS renegotiation to be used | `RenegotiateOnceAsClient`
-| `securityToken` | N | Output |The value of a token to be added to an HTTP request as a header. Used together with `securityTokenHeader` |
-| `securityTokenHeader`| N | Output |The name of the header for `securityToken` on an HTTP request that |
-| `direction`| N | Output |The direction of the binding | `"output"`
-
-### How to configure MTLS related fields in Metadata
+| `url` | Y | Output | The base URL of the HTTP endpoint to invoke | `http://host:port/path`, `http://myservice:8000/customers` |
+| `maxResponseBodySize`| N | Output | Maximum length of the response to read. A whole number is interpreted as bytes; units such as `Ki, Mi, Gi` (SI) or `k | M | G` (decimal) can be added for convenience. The default value is `100Mi` | "1Gi", "100Mi", "20Ki", "200" (bytes) |
+| `MTLSRootCA` | N | Output | Path to root CA certificate or PEM-encoded string |
+| `MTLSClientCert` | N | Output | Path to client certificate or PEM-encoded string |
+| `MTLSClientKey` | N | Output | Path client private key or PEM-encoded string |
+| `MTLSRenegotiation` | N | Output | Type of mTLS renegotiation to be used | `RenegotiateOnceAsClient`
+| `securityToken` | N | Output | The value of a token to be added to a HTTP request as a header. Used together with `securityTokenHeader` |
+| `securityTokenHeader` | N | Output | The name of the header for `securityToken` on a HTTP request |
+| `direction` | N | Output |The direction of the binding | `"output"`
+
+### How to configure mTLS-related fields in metadata
+
The values for **MTLSRootCA**, **MTLSClientCert** and **MTLSClientKey** can be provided in three ways:
-1. Secret store reference
-```yaml
-apiVersion: dapr.io/v1alpha1
-kind: Component
-metadata:
- name:
-spec:
- type: bindings.http
- version: v1
- metadata:
- - name: url
- value: http://something.com
- - name: MTLSRootCA
- secretKeyRef:
- name: mysecret
- key: myrootca
-auth:
- secretStore:
-```
-2. Path to the file: The absolute path to the file can be provided as a value for the field.
-3. PEM encoded string: The PEM encoded string can also be provided as a value for the field.
+
+- Secret store reference:
+
+ ```yaml
+ apiVersion: dapr.io/v1alpha1
+ kind: Component
+ metadata:
+ name:
+ spec:
+ type: bindings.http
+ version: v1
+ metadata:
+ - name: url
+ value: http://something.com
+ - name: MTLSRootCA
+ secretKeyRef:
+ name: mysecret
+ key: myrootca
+ auth:
+ secretStore:
+ ```
+
+- Path to the file: the absolute path to the file can be provided as a value for the field.
+- PEM encoded string: the PEM-encoded string can also be provided as a value for the field.
{{% alert title="Note" color="primary" %}}
-Metadata fields **MTLSRootCA**, **MTLSClientCert** and **MTLSClientKey** are used to configure TLS(m) authentication.
-To use mTLS authentication, you must provide all three fields. See [mTLS]({{< ref "#using-mtls-or-enabling-client-tls-authentication-along-with-https" >}}) for more details. You can also provide only **MTLSRootCA**, to enable **HTTPS** connection. See [HTTPS]({{< ref "#install-the-ssl-certificate-in-the-sidecar" >}}) section for more details.
+Metadata fields **MTLSRootCA**, **MTLSClientCert** and **MTLSClientKey** are used to configure (m)TLS authentication.
+To use mTLS authentication, you must provide all three fields. See [mTLS]({{< ref "#using-mtls-or-enabling-client-tls-authentication-along-with-https" >}}) for more details. You can also provide only **MTLSRootCA**, to enable **HTTPS** connection with a certificate signed by a custom CA. See [HTTPS]({{< ref "#install-the-ssl-certificate-in-the-sidecar" >}}) section for more details.
{{% /alert %}}
@@ -107,8 +114,8 @@ All of the operations above support the following metadata fields
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
-| path | N | The path to append to the base URL. Used for accessing specific URIs | `"/1234"`, `"/search?lastName=Jones"`
-| Headers* | N | Any fields that have a capital first letter are sent as request headers | `"Content-Type"`, `"Accept"`
+| `path` | N | The path to append to the base URL. Used for accessing specific URIs. | `"/1234"`, `"/search?lastName=Jones"`
+| Field with a capitalized first letter | N | Any fields that have a capital first letter are sent as request headers | `"Content-Type"`, `"Accept"`
#### Retrieving data
@@ -137,9 +144,9 @@ The response body contains the data returned by the HTTP endpoint. The `data` f
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
-| statusCode | Y | The [HTTP status code](https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html) | `200`, `404`, `503`
-| status | Y | The status description | `"200 OK"`, `"201 Created"`
-| Headers* | N | Any fields that have a capital first letter are sent as request headers | `"Content-Type"`
+| `statusCode` | Y | The [HTTP status code](https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html) | `200`, `404`, `503` |
+| `status` | Y | The status description | `"200 OK"`, `"201 Created"` |
+| Field with a capitalized first letter | N | Any fields that have a capital first letter are sent as request headers | `"Content-Type"` |
#### Example
@@ -168,14 +175,14 @@ curl -d '{ "operation": "get" }' \
{{< tabs Windows Linux >}}
{{% codetab %}}
-```bash
+```sh
curl -d "{ \"operation\": \"get\", \"metadata\": { \"path\": \"/things/1234\" } }" \
http://localhost:/v1.0/bindings/
```
{{% /codetab %}}
{{% codetab %}}
-```bash
+```sh
curl -d '{ "operation": "get", "metadata": { "path": "/things/1234" } }' \
http://localhost:/v1.0/bindings/
```
@@ -210,14 +217,14 @@ For example, the default content type is `application/json; charset=utf-8`. This
{{< tabs Windows Linux >}}
{{% codetab %}}
-```bash
+```sh
curl -d "{ \"operation\": \"post\", \"data\": \"YOUR_BASE_64_CONTENT\", \"metadata\": { \"path\": \"/things\" } }" \
http://localhost:/v1.0/bindings/
```
{{% /codetab %}}
{{% codetab %}}
-```bash
+```sh
curl -d '{ "operation": "post", "data": "YOUR_BASE_64_CONTENT", "metadata": { "path": "/things" } }' \
http://localhost:/v1.0/bindings/
```
@@ -229,9 +236,8 @@ curl -d '{ "operation": "post", "data": "YOUR_BASE_64_CONTENT", "metadata": { "p
The HTTP binding can also be used with HTTPS endpoints by configuring the Dapr sidecar to trust the server's SSL certificate.
-
1. Update the binding URL to use `https` instead of `http`.
-1. Refer [How-To: Install certificates in the Dapr sidecar]({{< ref install-certificates >}}), to install the SSL certificate in the sidecar.
+1. If you need to add a custom TLS certificate, refer [How-To: Install certificates in the Dapr sidecar]({{< ref install-certificates >}}), to install the TLS certificates in the sidecar.
### Example
@@ -251,13 +257,12 @@ spec:
value: https://my-secured-website.com # Use HTTPS
```
-#### Install the SSL certificate in the sidecar
-
+#### Install the TLS certificate in the sidecar
{{< tabs Self-Hosted Kubernetes >}}
{{% codetab %}}
-When the sidecar is not running inside a container, the SSL certificate can be directly installed on the host operating system.
+When the sidecar is not running inside a container, the TLS certificate can be directly installed on the host operating system.
Below is an example when the sidecar is running as a container. The SSL certificate is located on the host computer at `/tmp/ssl/cert.pem`.
@@ -286,7 +291,7 @@ services:
{{% codetab %}}
-The sidecar can read the SSL certificate from a variety of sources. See [How-to: Mount Pod volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts >}}) for more. In this example, we store the SSL certificate as a Kubernetes secret.
+The sidecar can read the TLS certificate from a variety of sources. See [How-to: Mount Pod volumes to the Dapr sidecar]({{< ref kubernetes-volume-mounts >}}) for more. In this example, we store the TLS certificate as a Kubernetes secret.
```bash
kubectl create secret generic myapp-cert --from-file /tmp/ssl/cert.pem
@@ -354,24 +359,26 @@ HTTPS binding support can also be configured using the **MTLSRootCA** metadata o
{{% /alert %}}
## Using mTLS or enabling client TLS authentication along with HTTPS
+
You can configure the HTTP binding to use mTLS or client TLS authentication along with HTTPS by providing the `MTLSRootCA`, `MTLSClientCert`, and `MTLSClientKey` metadata fields in the binding component.
-These fields can be passed as a file path or as a pem encoded string.
+These fields can be passed as a file path or as a pem encoded string:
+
- If the file path is provided, the file is read and the contents are used.
-- If the pem encoded string is provided, the string is used as is.
+- If the PEM-encoded string is provided, the string is used as is.
+
When these fields are configured, the Dapr sidecar uses the provided certificate to authenticate itself with the server during the TLS handshake process.
-If the remote server is enforcing TLS renegotiation, you also need to set the metadata field `MTLSRenegotiation`. This field accepts one of following options:
+If the remote server is enforcing TLS renegotiation, you also need to set the metadata field `MTLSRenegotiation`. This field accepts one of following options:
+
- `RenegotiateNever`
- `RenegotiateOnceAsClient`
-- `RenegotiateFreelyAsClient`.
+- `RenegotiateFreelyAsClient`
For more details see [the Go `RenegotiationSupport` documentation](https://pkg.go.dev/crypto/tls#RenegotiationSupport).
-### When to use:
You can use this when the server with which the HTTP binding is configured to communicate requires mTLS or client TLS authentication.
-
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md
index 38afe3c503a..7029b546bd8 100644
--- a/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md
+++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kafka.md
@@ -74,6 +74,7 @@ spec:
| `oidcScopes` | N | Input/Output | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` |
| `version` | N | Input/Output | Kafka cluster version. Defaults to 2.0.0. Please note that this needs to be mandatorily set to `1.0.0` for EventHubs with Kafka. | `"1.0.0"` |
| `direction` | N | Input/Output | The direction of the binding. | `"input"`, `"output"`, `"input, output"` |
+| `oidcExtensions` | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` |
#### Note
The metadata `version` must be set to `1.0.0` when using Azure EventHubs with Kafka.
diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md b/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md
index 25391a7748d..ee6389e0009 100644
--- a/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md
+++ b/daprdocs/content/en/reference/components-reference/supported-bindings/kubernetes-binding.md
@@ -36,6 +36,7 @@ spec:
| `namespace` | Y | Input | The Kubernetes namespace to read events from | `"default"` |
| `resyncPeriodInSec` | N | Input | The period of time to refresh event list from Kubernetes API server. Defaults to `"10"` | `"15"`
| `direction` | N | Input | The direction of the binding | `"input"`
+| `kubeconfigPath` | N | Input | The path to the kubeconfig file. If not specified, the binding uses the default in-cluster config value | `"/path/to/kubeconfig"`
## Binding support
diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md
index d03dcfcab89..3c44b53a84c 100644
--- a/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md
+++ b/daprdocs/content/en/reference/components-reference/supported-bindings/mysql.md
@@ -60,16 +60,23 @@ Note that you can not use secret just for username/password. If you use secret,
### SSL connection
If your server requires SSL your connection string must end of `&tls=custom` for example:
+
```bash
":@tcp(:3306)/?allowNativePasswords=true&tls=custom"
```
- You must replace the `` with a full path to the PEM file. If you are using [MySQL on Azure](http://bit.ly/AzureMySQLSSL) see the Azure [documentation on SSL database connections](http://bit.ly/MySQLSSL), for information on how to download the required certificate. The connection to MySQL will require a minimum TLS version of 1.2.
-Also note that by default [MySQL go driver](https://github.com/go-sql-driver/mysql) only supports one SQL statement per query/command.
+> You must replace the `` with a full path to the PEM file. If you are using Azure Database for MySQL see the Azure [documentation on SSL database connections](https://learn.microsoft.com/azure/mysql/single-server/how-to-configure-ssl), for information on how to download the required certificate. The connection to MySQL requires a minimum TLS version of 1.2.
+
+### Multiple statements
+
+By default, the [MySQL Go driver](https://github.com/go-sql-driver/mysql) only supports one SQL statement per query/command.
+
To allow multiple statements in one query you need to add `multiStatements=true` to a query string, for example:
+
```bash
":@tcp(:3306)/?multiStatements=true"
```
+
While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned,
all other results are silently discarded.
@@ -81,17 +88,35 @@ This component supports **output binding** with the following operations:
- `query`
- `close`
+### Parametrized queries
+
+This binding supports parametrized queries, which allow separating the SQL query itself from user-supplied values. The usage of parametrized queries is **strongly recommended** for security reasons, as they prevent [SQL Injection attacks](https://owasp.org/www-community/attacks/SQL_Injection).
+
+For example:
+
+```sql
+-- ❌ WRONG! Includes values in the query and is vulnerable to SQL Injection attacks.
+SELECT * FROM mytable WHERE user_key = 'something';
+
+-- ✅ GOOD! Uses parametrized queries.
+-- This will be executed with parameters ["something"]
+SELECT * FROM mytable WHERE user_key = ?;
+```
+
### exec
The `exec` operation can be used for DDL operations (like table creation), as well as `INSERT`, `UPDATE`, `DELETE` operations which return only metadata (e.g. number of affected rows).
+The `params` property is a string containing a JSON-encoded array of parameters.
+
**Request**
```json
{
"operation": "exec",
"metadata": {
- "sql": "INSERT INTO foo (id, c1, ts) VALUES (1, 'demo', '2020-09-24T11:45:05Z07:00')"
+ "sql": "INSERT INTO foo (id, c1, ts) VALUES (?, ?, ?)",
+ "params": "[1, \"demo\", \"2020-09-24T11:45:05Z07:00\"]"
}
}
```
@@ -106,7 +131,7 @@ The `exec` operation can be used for DDL operations (like table creation), as we
"start-time": "2020-09-24T11:13:46.405097Z",
"end-time": "2020-09-24T11:13:46.414519Z",
"rows-affected": "1",
- "sql": "INSERT INTO foo (id, c1, ts) VALUES (1, 'demo', '2020-09-24T11:45:05Z07:00')"
+ "sql": "INSERT INTO foo (id, c1, ts) VALUES (?, ?, ?)"
}
}
```
@@ -115,13 +140,16 @@ The `exec` operation can be used for DDL operations (like table creation), as we
The `query` operation is used for `SELECT` statements, which returns the metadata along with data in a form of an array of row values.
+The `params` property is a string containing a JSON-encoded array of parameters.
+
**Request**
```json
{
"operation": "query",
"metadata": {
- "sql": "SELECT * FROM foo WHERE id < 3"
+ "sql": "SELECT * FROM foo WHERE id < $1",
+ "params": "[3]"
}
}
```
@@ -135,7 +163,7 @@ The `query` operation is used for `SELECT` statements, which returns the metadat
"duration": "432µs",
"start-time": "2020-09-24T11:13:46.405097Z",
"end-time": "2020-09-24T11:13:46.420566Z",
- "sql": "SELECT * FROM foo WHERE id < 3"
+ "sql": "SELECT * FROM foo WHERE id < ?"
},
"data": [
{column_name: value, column_name: value, ...},
@@ -150,7 +178,7 @@ or numbers (language specific data type)
### close
-Finally, the `close` operation can be used to explicitly close the DB connection and return it to the pool. This operation doesn't have any response.
+The `close` operation can be used to explicitly close the DB connection and return it to the pool. This operation doesn't have any response.
**Request**
@@ -160,8 +188,6 @@ Finally, the `close` operation can be used to explicitly close the DB connection
}
```
-> Note, the MySQL binding itself doesn't prevent SQL injection, like with any database application, validate the input before executing query.
-
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md b/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md
index 31c9f230cff..cfaf92ad37c 100644
--- a/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md
+++ b/daprdocs/content/en/reference/components-reference/supported-bindings/postgresql.md
@@ -22,10 +22,11 @@ spec:
type: bindings.postgresql
version: v1
metadata:
- - name: url # Required
- value: ""
- - name: direction
- value: ""
+ # Connection string
+ - name: connectionString
+ value: ""
+ - name: direction
+ value: ""
```
{{% alert title="Warning" color="warning" %}}
@@ -34,25 +35,48 @@ The above example uses secrets as plain strings. It is recommended to use a secr
## Spec metadata fields
-| Field | Required | Binding support | Details | Example |
-|--------------------|:--------:|------------|-----|---------|
-| `url` | Y | Output | PostgreSQL connection string See [here](#url-format) for more details | `"user=dapr password=secret host=dapr.example.com port=5432 dbname=dapr sslmode=verify-ca"` |
-| `direction` | N | Output | The direction of the binding | `"output"` |
+### Authenticate using a connection string
+
+The following metadata options are **required** to authenticate using a PostgreSQL connection string.
+
+| Field | Required | Details | Example |
+|--------|:--------:|---------|---------|
+| `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"`
+
+### Authenticate using Azure AD
+
+Authenticating with Azure AD is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity.
+
+| Field | Required | Details | Example |
+|--------|:--------:|---------|---------|
+| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` |
+| `connectionString` | Y | The connection string for the PostgreSQL database. This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Azure AD identity; this is often the name of the corresponding principal (e.g. the name of the Azure AD application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` |
+| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-…"` |
+| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` |
+| `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` |
+
+### Other metadata options
+
+| Field | Required | Binding support |Details | Example |
+|--------------------|:--------:|-----|---|---------|
+| `maxConns` | N | Output | Maximum number of connections pooled by this component. Set to 0 or lower to use the default value, which is the greater of 4 or the number of CPUs. | `"4"`
+| `connectionMaxIdleTime` | N | Output | Max idle time before unused connections are automatically closed in the connection pool. By default, there's no value and this is left to the database driver to choose. | `"5m"`
+| `queryExecMode` | N | Output | Controls the default mode for executing queries. By default Dapr uses the extended protocol and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as PGBouncer. In this case it may be preferrable to use `exec` or `simple_protocol`. | `"simple_protocol"`
### URL format
-The PostgreSQL binding uses [pgx connection pool](https://github.com/jackc/pgx) internally so the `url` parameter can be any valid connection string, either in a `DSN` or `URL` format:
+The PostgreSQL binding uses [pgx connection pool](https://github.com/jackc/pgx) internally so the `connectionString` parameter can be any valid connection string, either in a `DSN` or `URL` format:
**Example DSN**
```shell
-user=dapr password=secret host=dapr.example.com port=5432 dbname=dapr sslmode=verify-ca
+user=dapr password=secret host=dapr.example.com port=5432 dbname=my_dapr sslmode=verify-ca
```
**Example URL**
```shell
-postgres://dapr:secret@dapr.example.com:5432/dapr?sslmode=verify-ca
+postgres://dapr:secret@dapr.example.com:5432/my_dapr?sslmode=verify-ca
```
Both methods also support connection pool configuration variables:
@@ -72,17 +96,35 @@ This component supports **output binding** with the following operations:
- `query`
- `close`
+### Parametrized queries
+
+This binding supports parametrized queries, which allow separating the SQL query itself from user-supplied values. The usage of parametrized queries is **strongly recommended** for security reasons, as they prevent [SQL Injection attacks](https://owasp.org/www-community/attacks/SQL_Injection).
+
+For example:
+
+```sql
+-- ❌ WRONG! Includes values in the query and is vulnerable to SQL Injection attacks.
+SELECT * FROM mytable WHERE user_key = 'something';
+
+-- ✅ GOOD! Uses parametrized queries.
+-- This will be executed with parameters ["something"]
+SELECT * FROM mytable WHERE user_key = $1;
+```
+
### exec
The `exec` operation can be used for DDL operations (like table creation), as well as `INSERT`, `UPDATE`, `DELETE` operations which return only metadata (e.g. number of affected rows).
+The `params` property is a string containing a JSON-encoded array of parameters.
+
**Request**
```json
{
"operation": "exec",
"metadata": {
- "sql": "INSERT INTO foo (id, c1, ts) VALUES (1, 'demo', '2020-09-24T11:45:05Z07:00')"
+ "sql": "INSERT INTO foo (id, c1, ts) VALUES ($1, $2, $3)",
+ "params": "[1, \"demo\", \"2020-09-24T11:45:05Z07:00\"]"
}
}
```
@@ -97,7 +139,7 @@ The `exec` operation can be used for DDL operations (like table creation), as we
"start-time": "2020-09-24T11:13:46.405097Z",
"end-time": "2020-09-24T11:13:46.414519Z",
"rows-affected": "1",
- "sql": "INSERT INTO foo (id, c1, ts) VALUES (1, 'demo', '2020-09-24T11:45:05Z07:00')"
+ "sql": "INSERT INTO foo (id, c1, ts) VALUES ($1, $2, $3)"
}
}
```
@@ -106,13 +148,16 @@ The `exec` operation can be used for DDL operations (like table creation), as we
The `query` operation is used for `SELECT` statements, which returns the metadata along with data in a form of an array of row values.
+The `params` property is a string containing a JSON-encoded array of parameters.
+
**Request**
```json
{
"operation": "query",
"metadata": {
- "sql": "SELECT * FROM foo WHERE id < 3"
+ "sql": "SELECT * FROM foo WHERE id < $1",
+ "params": "[3]"
}
}
```
@@ -126,7 +171,7 @@ The `query` operation is used for `SELECT` statements, which returns the metadat
"duration": "432µs",
"start-time": "2020-09-24T11:13:46.405097Z",
"end-time": "2020-09-24T11:13:46.420566Z",
- "sql": "SELECT * FROM foo WHERE id < 3"
+ "sql": "SELECT * FROM foo WHERE id < $1"
},
"data": "[
[0,\"test-0\",\"2020-09-24T04:13:46Z\"],
@@ -138,7 +183,7 @@ The `query` operation is used for `SELECT` statements, which returns the metadat
### close
-Finally, the `close` operation can be used to explicitly close the DB connection and return it to the pool. This operation doesn't have any response.
+The `close` operation can be used to explicitly close the DB connection and return it to the pool. This operation doesn't have any response.
**Request**
@@ -148,8 +193,6 @@ Finally, the `close` operation can be used to explicitly close the DB connection
}
```
-> Note, the PostgreSQL binding itself doesn't prevent SQL injection, like with any database application, validate the input before executing query.
-
## Related links
- [Basic schema for a Dapr component]({{< ref component-schema >}})
diff --git a/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md b/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md
index 1f8ffe68b7b..e2c74a4ba4b 100644
--- a/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md
+++ b/daprdocs/content/en/reference/components-reference/supported-bindings/servicebusqueues.md
@@ -56,7 +56,7 @@ spec:
# value: "5"
# - name: publishInitialRetryIntervalInMs # Optional
# value: "500"
- # - name: direction
+ # - name: direction
# value: "input, output"
```
{{% alert title="Warning" color="warning" %}}
@@ -129,6 +129,47 @@ This component supports **output binding** with the following operations:
- `create`: publishes a message to the specified queue
+## Message metadata
+
+Azure Service Bus messages extend the Dapr message format with additional contextual metadata. Some metadata fields are set by Azure Service Bus itself (read-only) and others can be set by the client when publishing a message through `Invoke` binding call with `create` operation.
+
+### Sending a message with metadata
+
+To set Azure Service Bus metadata when sending a message, set the query parameters on the HTTP request or the gRPC metadata as documented [here]({{< ref "bindings_api.md" >}}).
+
+- `metadata.MessageId`
+- `metadata.CorrelationId`
+- `metadata.SessionId`
+- `metadata.Label`
+- `metadata.ReplyTo`
+- `metadata.PartitionKey`
+- `metadata.To`
+- `metadata.ContentType`
+- `metadata.ScheduledEnqueueTimeUtc`
+- `metadata.ReplyToSessionId`
+
+{{% alert title="Note" color="primary" %}}
+- The `metadata.MessageId` property does not set the `id` property of the cloud event returned by Dapr and should be treated in isolation.
+- The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats.
+{{% /alert %}}
+
+### Receiving a message with metadata
+
+When Dapr calls your application, it attaches Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata.
+In addition to the [settable metadata listed above](#sending-a-message-with-metadata), you can also access the following read-only message metadata.
+
+- `metadata.DeliveryCount`
+- `metadata.LockedUntilUtc`
+- `metadata.LockToken`
+- `metadata.EnqueuedTimeUtc`
+- `metadata.SequenceNumber`
+
+To find out more details on the purpose of any of these metadata properties, please refer to [the official Azure Service Bus documentation](https://docs.microsoft.com/rest/api/servicebus/message-headers-and-properties#message-headers).
+
+{{% alert title="Note" color="primary" %}}
+All times are populated by the server and are not adjusted for clock skews.
+{{% /alert %}}
+
## Specifying a TTL per message
Time to live can be defined on a per-queue level (as illustrated above) or at the message level. The value defined at message level overwrites any value set at the queue level.
diff --git a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md
index 43b9820e081..15fa476ae00 100644
--- a/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md
+++ b/daprdocs/content/en/reference/components-reference/supported-configuration-stores/postgresql-configuration-store.md
@@ -21,13 +21,36 @@ spec:
type: configuration.postgresql
version: v1
metadata:
- - name: connectionString
- value: "host=localhost user=postgres password=example port=5432 connect_timeout=10 database=config"
- - name: table # name of the table which holds configuration information
- value: "[your_configuration_table_name]"
- - name: connMaxIdleTime # max timeout for connection
- value : "15s"
-
+ # Connection string
+ - name: connectionString
+ value: "host=localhost user=postgres password=example port=5432 connect_timeout=10 database=config"
+ # Name of the table which holds configuration information
+ - name: table
+ value: "[your_configuration_table_name]"
+ # Timeout for database operations, in seconds (optional)
+ #- name: timeoutInSeconds
+ # value: 20
+ # Name of the table where to store the state (optional)
+ #- name: tableName
+ # value: "state"
+ # Name of the table where to store metadata used by Dapr (optional)
+ #- name: metadataTableName
+ # value: "dapr_metadata"
+ # Cleanup interval in seconds, to remove expired rows (optional)
+ #- name: cleanupIntervalInSeconds
+ # value: 3600
+ # Maximum number of connections pooled by this component (optional)
+ #- name: maxConns
+ # value: 0
+ # Max idle time for connections before they're closed (optional)
+ #- name: connectionMaxIdleTime
+ # value: 0
+ # Controls the default mode for executing queries. (optional)
+ #- name: queryExecMode
+ # value: ""
+ # Uncomment this if you wish to use PostgreSQL as a state store for actors (optional)
+ #- name: actorStateStore
+ # value: "true"
```
{{% alert title="Warning" color="warning" %}}
@@ -36,69 +59,101 @@ The above example uses secrets as plain strings. It is recommended to use a secr
## Spec metadata fields
-| Field | Required | Details | Example |
+### Authenticate using a connection string
+
+The following metadata options are **required** to authenticate using a PostgreSQL connection string.
+
+| Field | Required | Details | Example |
+|--------|:--------:|---------|---------|
+| `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"`
+
+### Authenticate using Azure AD
+
+Authenticating with Azure AD is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity.
+
+| Field | Required | Details | Example |
+|--------|:--------:|---------|---------|
+| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` |
+| `connectionString` | Y | The connection string for the PostgreSQL database. This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Azure AD identity; this is often the name of the corresponding principal (e.g. the name of the Azure AD application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` |
+| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-…"` |
+| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` |
+| `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` |
+
+### Other metadata options
+
+| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
-| connectionString | Y | The connection string for PostgreSQL. Default pool_max_conns = 5 | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test pool_max_conns=10"`
-| table | Y | Table name for configuration information, must be lowercased. | `configtable`
+| `table` | Y | Table name for configuration information, must be lowercased. | `configtable`
+| `maxConns` | N | Maximum number of connections pooled by this component. Set to 0 or lower to use the default value, which is the greater of 4 or the number of CPUs. | `"4"`
+| `connectionMaxIdleTime` | N | Max idle time before unused connections are automatically closed in the connection pool. By default, there's no value and this is left to the database driver to choose. | `"5m"`
+| `queryExecMode` | N | Controls the default mode for executing queries. By default Dapr uses the extended protocol and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as PGBouncer. In this case it may be preferrable to use `exec` or `simple_protocol`. | `"simple_protocol"`
## Set up PostgreSQL as Configuration Store
-1. Start PostgreSQL Database
-1. Connect to the PostgreSQL database and setup a configuration table with following schema -
+1. Start the PostgreSQL Database
+1. Connect to the PostgreSQL database and setup a configuration table with following schema:
+
+ | Field | Datatype | Nullable |Details |
+ |--------------------|:--------:|---------|---------|
+ | KEY | VARCHAR | N |Holds `"Key"` of the configuration attribute |
+ | VALUE | VARCHAR | N |Holds Value of the configuration attribute |
+ | VERSION | VARCHAR | N | Holds version of the configuration attribute |
+ | METADATA | JSON | Y | Holds Metadata as JSON |
+
+ ```sql
+ CREATE TABLE IF NOT EXISTS table_name (
+ KEY VARCHAR NOT NULL,
+ VALUE VARCHAR NOT NULL,
+ VERSION VARCHAR NOT NULL,
+ METADATA JSON
+ );
+ ```
+
+3. Create a TRIGGER on configuration table. An example function to create a TRIGGER is as follows:
+
+ ```sh
+ CREATE OR REPLACE FUNCTION configuration_event() RETURNS TRIGGER AS $$
+ DECLARE
+ data json;
+ notification json;
+
+ BEGIN
+
+ IF (TG_OP = 'DELETE') THEN
+ data = row_to_json(OLD);
+ ELSE
+ data = row_to_json(NEW);
+ END IF;
+
+ notification = json_build_object(
+ 'table',TG_TABLE_NAME,
+ 'action', TG_OP,
+ 'data', data);
+ PERFORM pg_notify('config',notification::text);
+ RETURN NULL;
+ END;
+ $$ LANGUAGE plpgsql;
+ ```
+
+4. Create the trigger with data encapsulated in the field labeled as `data`:
+
+ ```sql
+ notification = json_build_object(
+ 'table',TG_TABLE_NAME,
+ 'action', TG_OP,
+ 'data', data
+ );
+ ```
-| Field | Datatype | Nullable |Details |
-|--------------------|:--------:|---------|---------|
-| KEY | VARCHAR | N |Holds `"Key"` of the configuration attribute |
-| VALUE | VARCHAR | N |Holds Value of the configuration attribute |
-| VERSION | VARCHAR | N | Holds version of the configuration attribute
-| METADATA | JSON | Y | Holds Metadata as JSON
-
-```console
-CREATE TABLE IF NOT EXISTS table_name (
- KEY VARCHAR NOT NULL,
- VALUE VARCHAR NOT NULL,
- VERSION VARCHAR NOT NULL,
- METADATA JSON );
-```
-3. Create a TRIGGER on configuration table. An example function to create a TRIGGER is as follows -
-```console
-CREATE OR REPLACE FUNCTION configuration_event() RETURNS TRIGGER AS $$
- DECLARE
- data json;
- notification json;
-
- BEGIN
-
- IF (TG_OP = 'DELETE') THEN
- data = row_to_json(OLD);
- ELSE
- data = row_to_json(NEW);
- END IF;
-
- notification = json_build_object(
- 'table',TG_TABLE_NAME,
- 'action', TG_OP,
- 'data', data);
-
- PERFORM pg_notify('config',notification::text);
- RETURN NULL;
- END;
-$$ LANGUAGE plpgsql;
-```
-4. Create the trigger with data encapsulated in the field labelled as `data`
-```ps
-notification = json_build_object(
- 'table',TG_TABLE_NAME,
- 'action', TG_OP,
- 'data', data);
-```
5. The channel mentioned as attribute to `pg_notify` should be used when subscribing for configuration notifications
6. Since this is a generic created trigger, map this trigger to `configuration table`
-```console
-CREATE TRIGGER config
-AFTER INSERT OR UPDATE OR DELETE ON configtable
- FOR EACH ROW EXECUTE PROCEDURE notify_event();
-```
+
+ ```sql
+ CREATE TRIGGER config
+ AFTER INSERT OR UPDATE OR DELETE ON configtable
+ FOR EACH ROW EXECUTE PROCEDURE notify_event();
+ ```
+
7. In the subscribe request add an additional metadata field with key as `pgNotifyChannel` and value should be set to same `channel name` mentioned in `pg_notify`. From the above example, it should be set to `config`
{{% alert title="Note" color="primary" %}}
@@ -106,12 +161,14 @@ When calling `subscribe` API, `metadata.pgNotifyChannel` should be used to speci
Any number of keys can be added to a subscription request. Each subscription uses an exclusive database connection. It is strongly recommended to subscribe to multiple keys within a single subscription. This helps optimize the number of connections to the database.
-Example of subscribe HTTP API -
-```ps
-curl --location --request GET 'http://:/configuration/mypostgresql/subscribe?key=&key=&metadata.pgNotifyChannel='
+Example of subscribe HTTP API:
+
+```sh
+curl -l 'http://:/configuration/mypostgresql/subscribe?key=&key=&metadata.pgNotifyChannel='
```
{{% /alert %}}
## Related links
+
- [Basic schema for a Dapr component]({{< ref component-schema >}})
- [Configuration building block]({{< ref configuration-api-overview >}})
diff --git a/daprdocs/content/en/reference/components-reference/supported-cryptography/kubernetes-secrets.md b/daprdocs/content/en/reference/components-reference/supported-cryptography/kubernetes-secrets.md
index 2e0ebc6f742..0471bd2c143 100644
--- a/daprdocs/content/en/reference/components-reference/supported-cryptography/kubernetes-secrets.md
+++ b/daprdocs/content/en/reference/components-reference/supported-cryptography/kubernetes-secrets.md
@@ -33,7 +33,11 @@ The above example uses secrets as plain strings. It is recommended to use a secr
## Spec metadata fields
-For the Kubernetes secret store component, there are no metadata attributes.
+| Field | Required | Details | Example |
+|--------------------|:--------:|------------|-----|---------|
+| `defaultNamespace` | N | Default namespace to retrieve secrets from. If unset, the namespace must be specified for each key, as `namespace/secretName/key` | `"default-ns"` |
+| `kubeconfigPath` | N | The path to the kubeconfig file. If not specified, the component uses the default in-cluster config value | `"/path/to/kubeconfig"`
+
## Related links
[Cryptography building block]({{< ref cryptography >}})
\ No newline at end of file
diff --git a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-routeralias.md b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-routeralias.md
index 5b125be48f7..62d3083cf8d 100644
--- a/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-routeralias.md
+++ b/daprdocs/content/en/reference/components-reference/supported-middleware/middleware-routeralias.md
@@ -7,12 +7,10 @@ aliases:
- /developing-applications/middleware/supported-middleware/middleware-routeralias/
---
-The router alias HTTP [middleware]({{< ref middleware.md >}}) component allows you to convert arbitrary HTTP routes arriving to Dapr to valid Dapr API endpoints.
+The router alias HTTP [middleware]({{< ref middleware.md >}}) component allows you to convert arbitrary HTTP routes arriving into Dapr to valid Dapr API endpoints.
## Component format
-The router alias middleware metadata contains name/value pairs, where the name describes the HTTP route to expect, and the value describes the corresponding Dapr API the request should be sent to.
-
```yaml
apiVersion: dapr.io/v1alpha1
kind: Component
@@ -22,17 +20,24 @@ spec:
type: middleware.http.routeralias
version: v1
metadata:
- - name: "/v1.0/mall/activity/info"
- value: "/v1.0/invoke/srv.default/method/mall/activity/info"
- - name: "/v1.0/hello/activity/{id}/info"
- value: "/v1.0/invoke/srv.default/method/hello/activity/info"
- - name: "/v1.0/hello/activity/{id}/user"
- value: "/v1.0/invoke/srv.default/method/hello/activity/user"
+ # String containing a JSON-encoded or YAML-encoded dictionary
+ # Each key in the dictionary is the incoming path, and the value is the path it's converted to
+ - name: "routes"
+ value: |
+ {
+ "/mall/activity/info": "/v1.0/invoke/srv.default/method/mall/activity/info",
+ "/hello/activity/{id}/info": "/v1.0/invoke/srv.default/method/hello/activity/info",
+ "/hello/activity/{id}/user": "/v1.0/invoke/srv.default/method/hello/activity/user"
+ }
```
-Example:
+In the example above, an incoming HTTP request for `/mall/activity/info?id=123` is transformed into `/v1.0/invoke/srv.default/method/mall/activity/info?id=123`.
+
+# Spec metadata fields
-An incoming HTTP request for `/v1.0/mall/activity/info?id=123` is transformed into `/v1.0/invoke/srv.default/method/mall/activity/info?id=123`.
+| Field | Details | Example |
+|-------|---------|---------|
+| `routes` | String containing a JSON-encoded or YAML-encoded dictionary. Each key in the dictionary is the incoming path, and the value is the path it's converted to. | See example above |
## Dapr configuration
diff --git a/daprdocs/content/en/reference/components-reference/supported-name-resolution/nr-kubernetes.md b/daprdocs/content/en/reference/components-reference/supported-name-resolution/nr-kubernetes.md
index 15f22643ff2..0f36aaacf2a 100644
--- a/daprdocs/content/en/reference/components-reference/supported-name-resolution/nr-kubernetes.md
+++ b/daprdocs/content/en/reference/components-reference/supported-name-resolution/nr-kubernetes.md
@@ -7,7 +7,22 @@ description: Detailed information on the Kubernetes DNS name resolution componen
## Configuration format
-Kubernetes DNS name resolution is configured automatically in [Kubernetes mode]({{< ref kubernetes >}}) by Dapr. There is no configuration needed to use Kubernetes DNS as your name resolution provider.
+Generally, Kubernetes DNS name resolution is configured automatically in [Kubernetes mode]({{< ref kubernetes >}}) by Dapr. There is no configuration needed to use Kubernetes DNS as your name resolution provider unless some overrides are necessary for the Kubernetes name resolution component.
+
+In the scenario that an override is required, within a [Dapr Configuration]({{< ref configuration-overview.md >}}) CRD, add a `nameResolution` spec and set the `component` field to `"kubernetes"`. The other configuration fields can be set as needed in a `configuration` map, as seen below.
+
+```yaml
+apiVersion: dapr.io/v1alpha1
+kind: Configuration
+metadata:
+ name: appconfig
+spec:
+ nameResolution:
+ component: "kubernetes"
+ configuration:
+ clusterDomain: "cluster.local" # Mutually exclusive with the template field
+ template: "{{.ID}}-{{.Data.region}}.internal:{{.Port}}" # Mutually exclusive with the clusterDomain field
+```
## Behaviour
@@ -15,7 +30,13 @@ The component resolves target apps by using the Kubernetes cluster's DNS provide
## Spec configuration fields
-Not applicable, as Kubernetes DNS is configured by Dapr when running in Kubernetes mode.
+The configuration spec is fixed to v1.3.0 of the Consul API
+
+| Field | Required | Type | Details | Examples |
+|--------------|:--------:|-----:|:---------|----------|
+| clusterDomain | N | `string` | The cluster domain to be used for resolved addresses. This field is mutually exclusive with the `template` file.| `cluster.local`
+| template | N | `string` | A template string to be parsed when addresses are resolved using [text/template](https://pkg.go.dev/text/template#Template) . The template will be populated by the fields in the [ResolveRequest](https://github.com/dapr/components-contrib/blob/release-{{% dapr-latest-version short="true" %}}/nameresolution/requests.go#L20) struct. This field is mutually exclusive with `clusterDomain` field. | `{{.ID}}-{{.Data.region}}.{{.Namespace}}.internal:{{.Port}}`
+
## Related links
diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md
index 48e2876c2d1..431a7bc5406 100644
--- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md
+++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-apache-kafka.md
@@ -80,6 +80,7 @@ spec:
| oidcClientID | N | The OAuth2 client ID that has been provisioned in the identity provider. Required when `authType` is set to `oidc` | `dapr-kafka` |
| oidcClientSecret | N | The OAuth2 client secret that has been provisioned in the identity provider: Required when `authType` is set to `oidc` | `"KeFg23!"` |
| oidcScopes | N | Comma-delimited list of OAuth2/OIDC scopes to request with the access token. Recommended when `authType` is set to `oidc`. Defaults to `"openid"` | `"openid,kafka-prod"` |
+| oidcExtensions | N | Input/Output | String containing a JSON-encoded dictionary of OAuth2/OIDC extensions to request with the access token | `{"cluster":"kafka","poolid":"kafkapool"}` |
The `secretKeyRef` above is referencing a [kubernetes secrets store]({{< ref kubernetes-secret-store.md >}}) to access the tls information. Visit [here]({{< ref setup-secret-store.md >}}) to learn more about how to configure a secret store component.
diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md
index 24aee2d4c1b..40d63bdfe75 100644
--- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md
+++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-eventhubs.md
@@ -117,14 +117,60 @@ spec:
value: "myeventhubstoragecontainer"
```
-## Sending multiple messages
+## Sending and receiving multiple messages
-Azure Event Hubs supports sending multiple messages in a single operation. To set the metadata for bulk operations, set the query parameters on the HTTP request or the gRPC metadata as documented [here]({{< ref pubsub_api >}})
+Azure Eventhubs supports sending and receiving multiple messages in a single operation using the bulk pub/sub API.
+
+### Configuring bulk publish
+
+To set the metadata for bulk publish operation, set the query parameters on the HTTP request or the gRPC metadata, [as documented in the API reference]({{< ref pubsub_api >}}).
| Metadata | Default |
|----------|---------|
| `metadata.maxBulkPubBytes` | `1000000` |
+### Configuring bulk subscribe
+
+When subscribing to a topic, you can configure `bulkSubscribe` options. Refer to [Subscribing messages in bulk]({{< ref "pubsub-bulk#subscribing-messages-in-bulk" >}}) for more details and to learn more about [the bulk subscribe API]({{< ref pubsub-bulk.md >}}).
+
+| Configuration | Default |
+|---------------|---------|
+| `maxMessagesCount` | `100` |
+| `maxAwaitDurationMs` | `10000` |
+
+## Configuring checkpoint frequency
+
+When subscribing to a topic, you can configure the checkpointing frequency in a partition by [setting the metadata in the HTTP or gRPC subscribe request ]({{< ref "pubsub_api.md#http-request-2" >}}). This metadata enables checkpointing after the configured number of events within a partition event sequence. Disable checkpointing by setting the frequency to `0`.
+
+[Learn more about checkpointing](https://learn.microsoft.com/azure/event-hubs/event-hubs-features#checkpointing).
+
+| Metadata | Default |
+| -------- | ------- |
+| `metadata.checkPointFrequencyPerPartition` | `1` |
+
+Following example shows a sample subscription file for [Declarative subscription]({{< ref "subscription-methods.md#declarative-subscriptions" >}}) using `checkPointFrequencyPerPartition` metadata. Similarly, you can also pass the metadata in [Programmatic subscriptions]({{< ref "subscription-methods.md#programmatic-subscriptions" >}}) as well.
+
+```yaml
+apiVersion: dapr.io/v2alpha1
+kind: Subscription
+metadata:
+ name: order-pub-sub
+spec:
+ topic: orders
+ routes:
+ default: /checkout
+ pubsubname: order-pub-sub
+ metadata:
+ checkPointFrequencyPerPartition: 1
+scopes:
+- orderprocessing
+- checkout
+```
+
+{{% alert title="Note" color="primary" %}}
+When subscribing to a topic using `BulkSubscribe`, you configure the checkpointing to occur after the specified number of _batches,_ instead of events, where _batch_ means the collection of events received in a single request.
+{{% /alert %}}
+
## Create an Azure Event Hub
Follow the instructions on the [documentation](https://docs.microsoft.com/azure/event-hubs/event-hubs-create) to set up Azure Event Hubs.
diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md
index 8ff7dbd5615..e98df4814f3 100644
--- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md
+++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-azure-servicebus-queues.md
@@ -11,7 +11,7 @@ aliases:
To set up Azure Service Bus Queues pub/sub, create a component of type `pubsub.azure.servicebus.queues`. See the [pub/sub broker component file]({{< ref setup-pubsub.md >}}) to learn how ConsumerID is automatically generated. Read the [How-to: Publish and Subscribe guide]({{< ref "howto-publish-subscribe.md#step-1-setup-the-pubsub-component" >}}) on how to create and apply a pub/sub configuration.
-> This component uses queues on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions).
+> This component uses queues on Azure Service Bus; see the official documentation for the differences between [topics and queues](https://learn.microsoft.com/azure/service-bus-messaging/service-bus-queues-topics-subscriptions).
> For using topics, see the [Azure Service Bus Topics pubsub component]({{< ref "setup-azure-servicebus-topics" >}}).
### Connection String Authentication
@@ -122,7 +122,7 @@ Azure Service Bus messages extend the Dapr message format with additional contex
### Sending a message with metadata
-To set Azure Service Bus metadata when sending a message, set the query parameters on the HTTP request or the gRPC metadata as documented [here](https://docs.dapr.io/reference/api/pubsub_api/#metadata).
+To set Azure Service Bus metadata when sending a message, set the query parameters on the HTTP request or the gRPC metadata as documented [here]({{< ref "pubsub_api.md#metadata" >}}).
- `metadata.MessageId`
- `metadata.CorrelationId`
@@ -135,13 +135,14 @@ To set Azure Service Bus metadata when sending a message, set the query paramete
- `metadata.ScheduledEnqueueTimeUtc`
- `metadata.ReplyToSessionId`
-> **Note:** The `metadata.MessageId` property does not set the `id` property of the cloud event returned by Dapr and should be treated in isolation.
-
-> **Note:** The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats.
+{{% alert title="Note" color="primary" %}}
+- The `metadata.MessageId` property does not set the `id` property of the cloud event returned by Dapr and should be treated in isolation.
+- The `metadata.ScheduledEnqueueTimeUtc` property supports the [RFC1123](https://www.rfc-editor.org/rfc/rfc1123) and [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) timestamp formats.
+{{% /alert %}}
### Receiving a message with metadata
-When Dapr calls your application, it will attach Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata.
+When Dapr calls your application, it attaches Azure Service Bus message metadata to the request using either HTTP headers or gRPC metadata.
In addition to the [settable metadata listed above](#sending-a-message-with-metadata), you can also access the following read-only message metadata.
- `metadata.DeliveryCount`
@@ -152,7 +153,9 @@ In addition to the [settable metadata listed above](#sending-a-message-with-meta
To find out more details on the purpose of any of these metadata properties, please refer to [the official Azure Service Bus documentation](https://docs.microsoft.com/rest/api/servicebus/message-headers-and-properties#message-headers).
-> Note: that all times are populated by the server and are not adjusted for clock skews.
+{{% alert title="Note" color="primary" %}}
+All times are populated by the server and are not adjusted for clock skews.
+{{% /alert %}}
## Sending and receiving multiple messages
diff --git a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md
index c966f69885a..56c2a26836f 100644
--- a/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md
+++ b/daprdocs/content/en/reference/components-reference/supported-pubsub/setup-rabbitmq.md
@@ -62,6 +62,10 @@ spec:
value: false
- name: ttlInSeconds
value: 60
+ - name: clientName
+ value: {podName}
+ - name: heartBeat
+ value: 10s
```
{{% alert title="Warning" color="warning" %}}
@@ -96,6 +100,8 @@ The above example uses secrets as plain strings. It is recommended to use a secr
| caCert | Required for using TLS | Certificate Authority (CA) certificate in PEM format for verifying server TLS certificates. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"`
| clientCert | Required for using TLS | TLS client certificate in PEM format. Must be used with `clientKey`. | `"-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----"`
| clientKey | Required for using TLS | TLS client key in PEM format. Must be used with `clientCert`. Can be `secretKeyRef` to use a secret reference. | `"-----BEGIN RSA PRIVATE KEY-----\n\n-----END RSA PRIVATE KEY-----"`
+| clientName | N | This RabbitMQ [client-provided connection name](https://www.rabbitmq.com/connections.html#client-provided-names) is a custom identifier. If set, the identifier is mentioned in RabbitMQ server log entries and management UI. Can be set to {uuid}, {podName}, or {appID}, which is replaced by Dapr runtime to the real value. | `"app1"`, `{uuid}`, `{podName}`, `{appID}`
+| heartBeat | N | Defines the heartbeat interval with the server, detecting the aliveness of the peer TCP connection with the RabbitMQ server. Defaults to `10s` . | `"10s"`
## Communication using TLS
diff --git a/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md b/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md
index b629503d827..a44a6de9a60 100644
--- a/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md
+++ b/daprdocs/content/en/reference/components-reference/supported-secret-stores/kubernetes-secret-store.md
@@ -32,7 +32,12 @@ spec:
```
## Spec metadata fields
-For the Kubernetes secret store component, there are no metadata attributes.
+
+| Field | Required | Details | Example |
+|--------------------|:--------:|------------|-----|---------|
+| `defaultNamespace` | N | Default namespace to retrieve secrets from. If unset, the `namespace` must be specified in each request metadata or via environment variable `NAMESPACE` | `"default-ns"` |
+| `kubeconfigPath` | N | The path to the kubeconfig file. If not specified, the store uses the default in-cluster config value | `"/path/to/kubeconfig"`
+
## Optional per-request metadata properties
diff --git a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md
index 6e1bfad9216..0d5c682422e 100644
--- a/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md
+++ b/daprdocs/content/en/reference/components-reference/supported-state-stores/setup-postgresql.md
@@ -7,13 +7,7 @@ aliases:
- "/operations/components/setup-state-store/supported-state-stores/setup-postgresql/"
---
-This component allows using PostgreSQL (Postgres) as state store for Dapr.
-
-## Create a Dapr component
-
-Create a file called `postgresql.yaml`, paste the following and replace the `` value with your connection string. The connection string is a standard PostgreSQL connection string. For example, `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"`. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string.
-
-If you want to also configure PostgreSQL to store actors, add the `actorStateStore` option as in the example below.
+This component allows using PostgreSQL (Postgres) as state store for Dapr. See [this guide]({{< ref "howto-get-save-state.md#step-1-setup-a-state-store" >}}) on how to create and apply a state store configuration.
```yaml
apiVersion: dapr.io/v1alpha1
@@ -24,42 +18,72 @@ spec:
type: state.postgresql
version: v1
metadata:
- # Connection string
- - name: connectionString
- value: ""
- # Timeout for database operations, in seconds (optional)
- #- name: timeoutInSeconds
- # value: 20
- # Name of the table where to store the state (optional)
- #- name: tableName
- # value: "state"
- # Name of the table where to store metadata used by Dapr (optional)
- #- name: metadataTableName
- # value: "dapr_metadata"
- # Cleanup interval in seconds, to remove expired rows (optional)
- #- name: cleanupIntervalInSeconds
- # value: 3600
- # Max idle time for connections before they're closed (optional)
- #- name: connectionMaxIdleTime
- # value: 0
- # Uncomment this if you wish to use PostgreSQL as a state store for actors (optional)
- #- name: actorStateStore
- # value: "true"
+ # Connection string
+ - name: connectionString
+ value: ""
+ # Timeout for database operations, in seconds (optional)
+ #- name: timeoutInSeconds
+ # value: 20
+ # Name of the table where to store the state (optional)
+ #- name: tableName
+ # value: "state"
+ # Name of the table where to store metadata used by Dapr (optional)
+ #- name: metadataTableName
+ # value: "dapr_metadata"
+ # Cleanup interval in seconds, to remove expired rows (optional)
+ #- name: cleanupIntervalInSeconds
+ # value: 3600
+ # Maximum number of connections pooled by this component (optional)
+ #- name: maxConns
+ # value: 0
+ # Max idle time for connections before they're closed (optional)
+ #- name: connectionMaxIdleTime
+ # value: 0
+ # Controls the default mode for executing queries. (optional)
+ #- name: queryExecMode
+ # value: ""
+ # Uncomment this if you wish to use PostgreSQL as a state store for actors (optional)
+ #- name: actorStateStore
+ # value: "true"
```
+
{{% alert title="Warning" color="warning" %}}
The above example uses secrets as plain strings. It is recommended to use a secret store for the secrets as described [here]({{< ref component-secrets.md >}}).
{{% /alert %}}
## Spec metadata fields
+### Authenticate using a connection string
+
+The following metadata options are **required** to authenticate using a PostgreSQL connection string.
+
+| Field | Required | Details | Example |
+|--------|:--------:|---------|---------|
+| `connectionString` | Y | The connection string for the PostgreSQL database. See the PostgreSQL [documentation on database connections](https://www.postgresql.org/docs/current/libpq-connect.html) for information on how to define a connection string. | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=my_db"`
+
+### Authenticate using Azure AD
+
+Authenticating with Azure AD is supported with Azure Database for PostgreSQL. All authentication methods supported by Dapr can be used, including client credentials ("service principal") and Managed Identity.
+
+| Field | Required | Details | Example |
+|--------|:--------:|---------|---------|
+| `useAzureAD` | Y | Must be set to `true` to enable the component to retrieve access tokens from Azure AD. | `"true"` |
+| `connectionString` | Y | The connection string for the PostgreSQL database. This must contain the user, which corresponds to the name of the user created inside PostgreSQL that maps to the Azure AD identity; this is often the name of the corresponding principal (e.g. the name of the Azure AD application). This connection string should not contain any password. | `"host=mydb.postgres.database.azure.com user=myapplication port=5432 database=my_db sslmode=require"` |
+| `azureTenantId` | N | ID of the Azure AD tenant | `"cd4b2887-304c-…"` |
+| `azureClientId` | N | Client ID (application ID) | `"c7dd251f-811f-…"` |
+| `azureClientSecret` | N | Client secret (application password) | `"Ecy3X…"` |
+
+### Other metadata options
+
| Field | Required | Details | Example |
|--------------------|:--------:|---------|---------|
-| `connectionString` | Y | The connection string for the PostgreSQL database | `"host=localhost user=postgres password=example port=5432 connect_timeout=10 database=dapr_test"`
| `timeoutInSeconds` | N | Timeout, in seconds, for all database operations. Defaults to `20` | `30`
| `tableName` | N | Name of the table where the data is stored. Defaults to `state`. Can optionally have the schema name as prefix, such as `public.state` | `"state"`, `"public.state"`
| `metadataTableName` | N | Name of the table Dapr uses to store a few metadata properties. Defaults to `dapr_metadata`. Can optionally have the schema name as prefix, such as `public.dapr_metadata` | `"dapr_metadata"`, `"public.dapr_metadata"`
| `cleanupIntervalInSeconds` | N | Interval, in seconds, to clean up rows with an expired TTL. Default: `3600` (i.e. 1 hour). Setting this to values <=0 disables the periodic cleanup. | `1800`, `-1`
+| `maxConns` | N | Maximum number of connections pooled by this component. Set to 0 or lower to use the default value, which is the greater of 4 or the number of CPUs. | `"4"`
| `connectionMaxIdleTime` | N | Max idle time before unused connections are automatically closed in the connection pool. By default, there's no value and this is left to the database driver to choose. | `"5m"`
+| `queryExecMode` | N | Controls the default mode for executing queries. By default Dapr uses the extended protocol and automatically prepares and caches prepared statements. However, this may be incompatible with proxies such as PGBouncer. In this case it may be preferrable to use `exec` or `simple_protocol`. | `"simple_protocol"`
| `actorStateStore` | N | Consider this state store for actors. Defaults to `"false"` | `"true"`, `"false"`
## Setup PostgreSQL
@@ -70,20 +94,21 @@ The above example uses secrets as plain strings. It is recommended to use a secr
1. Run an instance of PostgreSQL. You can run a local instance of PostgreSQL in Docker CE with the following command:
- This example does not describe a production configuration because it sets the password in plain text and the user name is left as the PostgreSQL default of "postgres".
-
```bash
docker run -p 5432:5432 -e POSTGRES_PASSWORD=example postgres
```
+ > This example does not describe a production configuration because it sets the password in plain text and the user name is left as the PostgreSQL default of "postgres".
+
2. Create a database for state data.
Either the default "postgres" database can be used, or create a new database for storing state data.
To create a new database in PostgreSQL, run the following SQL command:
- ```SQL
- CREATE DATABASE dapr_test;
+ ```sql
+ CREATE DATABASE my_dapr;
```
+
{{% /codetab %}}
{{% /tabs %}}
diff --git a/daprdocs/content/en/reference/environment/_index.md b/daprdocs/content/en/reference/environment/_index.md
index 762bb3592a8..9ae9b7b22f1 100644
--- a/daprdocs/content/en/reference/environment/_index.md
+++ b/daprdocs/content/en/reference/environment/_index.md
@@ -26,4 +26,5 @@ The following table lists the environment variables used by the Dapr runtime, CL
| OTEL_EXPORTER_OTLP_INSECURE | OpenTelemetry Tracing | Sets the connection to the endpoint as unencrypted. (`true`, `false`) |
| OTEL_EXPORTER_OTLP_PROTOCOL | OpenTelemetry Tracing | The OTLP protocol to use Transport protocol. (`grpc`, `http/protobuf`, `http/json`) |
| DAPR_COMPONENTS_SOCKETS_FOLDER | Dapr runtime and the .NET, Go, and Java pluggable component SDKs | The location or path where Dapr looks for Pluggable Components Unix Domain Socket files. If unset this location defaults to `/tmp/dapr-components-sockets` |
-| DAPR_COMPONENTS_SOCKETS_EXTENSION | .NET and Java pluggable component SDKs | A per-SDK configuration that indicates the default file extension applied to socket files created by the SDKs. Not a Dapr-enforced behavior. |
\ No newline at end of file
+| DAPR_COMPONENTS_SOCKETS_EXTENSION | .NET and Java pluggable component SDKs | A per-SDK configuration that indicates the default file extension applied to socket files created by the SDKs. Not a Dapr-enforced behavior. |
+| DAPR_PLACEMENT_METADATA_ENABLED | Dapr placement | Enable an endpoint for the Placement service that exposes placement table information on actor usage. Set to `true` to enable in self-hosted mode. [Learn more about the Placement API]({{< ref placement_api.md >}}) |
\ No newline at end of file
diff --git a/daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md b/daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md
index f6bced2a3c1..6517d4795d8 100644
--- a/daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md
+++ b/daprdocs/content/en/reference/resource-specs/httpendpoints-schema.md
@@ -27,6 +27,19 @@ spec:
secretKeyRef:
name:
key:
+ clientTLS:
+ rootCA:
+ secretKeyRef:
+ name:
+ key:
+ certificate:
+ secretKeyRef:
+ name:
+ key:
+ privateKey:
+ secretKeyRef:
+ name:
+ key:
scopes: # Optional
-
auth: # Optional
@@ -39,6 +52,7 @@ auth: # Optional
|--------------------|:--------:|---------|---------|
| baseUrl | Y | Base URL of the non-Dapr endpoint | `"https://api.github.com"`, `"http://api.github.com"`
| headers | N | HTTP request headers for service invocation | `name: "Accept-Language" value: "en-US"` `name: "Authorization" secretKeyRef.name: "my-secret" secretKeyRef.key: "myGithubToken" `
+| clientTLS | N | Enables TLS authentication to an endpoint with any standard combination of root certificate, client certificate and private key
## Related links
diff --git a/daprdocs/data/components/state_stores/generic.yaml b/daprdocs/data/components/state_stores/generic.yaml
index a4ffcd52cb2..e0b685b648c 100644
--- a/daprdocs/data/components/state_stores/generic.yaml
+++ b/daprdocs/data/components/state_stores/generic.yaml
@@ -45,8 +45,8 @@
- component: etcd
link: setup-etcd
state: Beta
- version: v1
- since: "1.11"
+ version: v2
+ since: "1.12"
features:
crud: true
transactions: true
diff --git a/daprdocs/layouts/shortcodes/dapr-latest-version.html b/daprdocs/layouts/shortcodes/dapr-latest-version.html
index 9b4bf780551..109d34c73d2 100644
--- a/daprdocs/layouts/shortcodes/dapr-latest-version.html
+++ b/daprdocs/layouts/shortcodes/dapr-latest-version.html
@@ -1 +1 @@
-{{- if .Get "short" }}1.11{{ else if .Get "long" }}1.11.3{{ else if .Get "cli" }}1.11.0{{ else }}1.11.3{{ end -}}
+{{- if .Get "short" }}1.12{{ else if .Get "long" }}1.12.0{{ else if .Get "cli" }}1.12.0{{ else }}1.12.0{{ end -}}
diff --git a/daprdocs/static/images/state-management-outbox.png b/daprdocs/static/images/state-management-outbox.png
new file mode 100644
index 00000000000..4ad434512b8
Binary files /dev/null and b/daprdocs/static/images/state-management-outbox.png differ
diff --git a/sdkdocs/dotnet b/sdkdocs/dotnet
index 2449bcd6691..99d874a2b13 160000
--- a/sdkdocs/dotnet
+++ b/sdkdocs/dotnet
@@ -1 +1 @@
-Subproject commit 2449bcd6691eb49825e0e8e9dff50bd50fd41c2e
+Subproject commit 99d874a2b138af020df099a0fc0a09a7d0597fae
diff --git a/sdkdocs/go b/sdkdocs/go
index ad25580bcfb..e16e0350a52 160000
--- a/sdkdocs/go
+++ b/sdkdocs/go
@@ -1 +1 @@
-Subproject commit ad25580bcfb638d56237faec0543565b4d0e134f
+Subproject commit e16e0350a52349b5a05138edc0b58e3be78ee753
diff --git a/sdkdocs/java b/sdkdocs/java
index 9dc842faba3..5e45aa86b81 160000
--- a/sdkdocs/java
+++ b/sdkdocs/java
@@ -1 +1 @@
-Subproject commit 9dc842faba3486e518babc29f7fbbca79248bfab
+Subproject commit 5e45aa86b81748bf1e6efdbf7f52c20645a12435
diff --git a/sdkdocs/js b/sdkdocs/js
index 7686ab039bc..df7eff281a5 160000
--- a/sdkdocs/js
+++ b/sdkdocs/js
@@ -1 +1 @@
-Subproject commit 7686ab039bcc30f375f922960020d403dd2d3867
+Subproject commit df7eff281a5a1395a7967c658a5707e8dfb2b99e
diff --git a/sdkdocs/python b/sdkdocs/python
index 64e834b0a06..6171b67db60 160000
--- a/sdkdocs/python
+++ b/sdkdocs/python
@@ -1 +1 @@
-Subproject commit 64e834b0a06f5b218efc941b8caf3683968b7208
+Subproject commit 6171b67db60d51704ed8425ae71dda9226bf1255