From 8f01f26820adc32a215191a10f2139382a23b6a7 Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 24 Oct 2025 11:51:00 +0100 Subject: [PATCH 01/17] adding support for HttpPipeline Signed-off-by: salaboy --- .../io/dapr/testcontainers/Configuration.java | 12 +++++- .../io/dapr/testcontainers/HttpPipeline.java | 37 +++++++++++++++++++ .../converter/ConfigurationYamlConverter.java | 11 ++++++ .../ConfigurationYamlConverterTest.java | 19 ++++++++-- 4 files changed, 73 insertions(+), 6 deletions(-) create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/HttpPipeline.java diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java index 4a431400c4..2d0d830155 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java @@ -20,11 +20,11 @@ public class Configuration { private final String name; private final TracingConfigurationSettings tracing; private final AppHttpPipeline appHttpPipeline; + private final HttpPipeline httpPipeline; // @TODO: add secrets https://github.com/dapr/java-sdk/issues/1280 // @TODO: add metrics https://github.com/dapr/java-sdk/issues/1281 // @TODO: add logging https://github.com/dapr/java-sdk/issues/1282 - // @TODO: add middleware httpPipeline https://github.com/dapr/java-sdk/issues/1283 // @TODO: add nameResolution https://github.com/dapr/java-sdk/issues/1284 // @TODO: add disallow components https://github.com/dapr/java-sdk/issues/1285 // @TODO: add mtls https://github.com/dapr/java-sdk/issues/1286 @@ -36,11 +36,15 @@ public class Configuration { * @param tracing TracingConfigParameters tracing configuration * parameters. * @param appHttpPipeline AppHttpPipeline middleware configuration. + * @param httpPipeline HttpPipeline middleware configuration. */ - public Configuration(String name, TracingConfigurationSettings tracing, AppHttpPipeline appHttpPipeline) { + public Configuration(String name, TracingConfigurationSettings tracing, + AppHttpPipeline appHttpPipeline, + HttpPipeline httpPipeline) { this.name = name; this.tracing = tracing; this.appHttpPipeline = appHttpPipeline; + this.httpPipeline = httpPipeline; } public String getName() { @@ -54,4 +58,8 @@ public TracingConfigurationSettings getTracing() { public AppHttpPipeline getAppHttpPipeline() { return appHttpPipeline; } + + public HttpPipeline getHttpPipeline() { + return httpPipeline; + } } diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/HttpPipeline.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/HttpPipeline.java new file mode 100644 index 0000000000..6cace36f04 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/HttpPipeline.java @@ -0,0 +1,37 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers; + +import java.util.Collections; +import java.util.List; + +public class HttpPipeline implements ConfigurationSettings { + private List handlers; + + /** + * Creates an HttpPipeline. + * + * @param handlers List of handlers for the HttpPipeline + */ + public HttpPipeline(List handlers) { + if (handlers != null) { + this.handlers = Collections.unmodifiableList(handlers); + } + } + + public List getHandlers() { + return handlers; + } + +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/converter/ConfigurationYamlConverter.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/converter/ConfigurationYamlConverter.java index 005f0b50f9..7d78dd7d25 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/converter/ConfigurationYamlConverter.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/converter/ConfigurationYamlConverter.java @@ -15,6 +15,7 @@ import io.dapr.testcontainers.AppHttpPipeline; import io.dapr.testcontainers.Configuration; +import io.dapr.testcontainers.HttpPipeline; import io.dapr.testcontainers.ListEntry; import io.dapr.testcontainers.OtelTracingConfigurationSettings; import io.dapr.testcontainers.TracingConfigurationSettings; @@ -77,6 +78,16 @@ public String convert(Configuration configuration) { } + HttpPipeline httpPipeline = configuration.getHttpPipeline(); + if (httpPipeline != null) { + + Map httpPipelineMap = new LinkedHashMap<>(); + List handlers = httpPipeline.getHandlers(); + httpPipelineMap.put("handlers", handlers); + configurationSpec.put("httpPipeline", httpPipelineMap); + + } + AppHttpPipeline appHttpPipeline = configuration.getAppHttpPipeline(); if (appHttpPipeline != null) { diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java index fe798611b5..8f15d465de 100644 --- a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java @@ -16,6 +16,7 @@ import io.dapr.testcontainers.AppHttpPipeline; import io.dapr.testcontainers.Configuration; import io.dapr.testcontainers.DaprContainer; +import io.dapr.testcontainers.HttpPipeline; import io.dapr.testcontainers.ListEntry; import io.dapr.testcontainers.OtelTracingConfigurationSettings; import io.dapr.testcontainers.TracingConfigurationSettings; @@ -48,15 +49,21 @@ public void testConfigurationToYaml() { ); - List handlers = new ArrayList<>(); - handlers.add(new ListEntry("alias", "middleware.http.routeralias")); + List appHttpHandlers = new ArrayList<>(); + appHttpHandlers.add(new ListEntry("alias", "middleware.http.routeralias")); - AppHttpPipeline appHttpPipeline = new AppHttpPipeline(handlers); + AppHttpPipeline appHttpPipeline = new AppHttpPipeline(appHttpHandlers); + + List httpHandlers = new ArrayList<>(); + + //Notice that this needs to be different objects, if not Snake YAML will add a reference to the object + HttpPipeline httpPipeline = new HttpPipeline(httpHandlers); + httpHandlers.add(new ListEntry("alias", "middleware.http.routeralias")); DaprContainer dapr = new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) .withAppName("dapr-app") .withAppPort(8081) - .withConfiguration(new Configuration("my-config", tracing, appHttpPipeline)) + .withConfiguration(new Configuration("my-config", tracing, appHttpPipeline, httpPipeline)) .withAppChannelAddress("host.testcontainers.internal"); Configuration configuration = dapr.getConfiguration(); @@ -76,6 +83,10 @@ public void testConfigurationToYaml() { + " endpointAddress: localhost:4317\n" + " isSecure: false\n" + " protocol: grpc\n" + + " httpPipeline:\n" + + " handlers:\n" + + " - name: alias\n" + + " type: middleware.http.routeralias\n" + " appHttpPipeline:\n" + " handlers:\n" + " - name: alias\n" From da7b542ccd7205443521de0e6807323e2b69fd77 Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 24 Oct 2025 11:58:45 +0100 Subject: [PATCH 02/17] using default constructor Signed-off-by: salaboy --- .../java/io/dapr/testcontainers/Configuration.java | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java index 2d0d830155..6bfb56643b 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java @@ -29,6 +29,20 @@ public class Configuration { // @TODO: add disallow components https://github.com/dapr/java-sdk/issues/1285 // @TODO: add mtls https://github.com/dapr/java-sdk/issues/1286 + + /** + * Creates a new configuration. + * + * @param name Configuration name. + * @param tracing TracingConfigParameters tracing configuration + * parameters. + * @param appHttpPipeline AppHttpPipeline middleware configuration. + */ + public Configuration(String name, TracingConfigurationSettings tracing, + AppHttpPipeline appHttpPipeline) { + this(name, tracing, appHttpPipeline, null); + } + /** * Creates a new configuration. * From e6dededf7c94bfc9ff75de678956f6fac6923dac Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 24 Oct 2025 14:33:05 +0100 Subject: [PATCH 03/17] fixing codestyle Signed-off-by: salaboy --- .../src/main/java/io/dapr/testcontainers/Configuration.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java index 6bfb56643b..d8a883dbee 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/Configuration.java @@ -28,8 +28,7 @@ public class Configuration { // @TODO: add nameResolution https://github.com/dapr/java-sdk/issues/1284 // @TODO: add disallow components https://github.com/dapr/java-sdk/issues/1285 // @TODO: add mtls https://github.com/dapr/java-sdk/issues/1286 - - + /** * Creates a new configuration. * From f95faae147dbd82999192cd83aa184ab4fb7eeb9 Mon Sep 17 00:00:00 2001 From: Matheus Cruz <56329339+mcruzdev@users.noreply.github.com> Date: Fri, 24 Oct 2025 15:00:56 -0300 Subject: [PATCH 04/17] Add statestore example with Outbox pattern (#1582) * Add statestore example with Outbox pattern Signed-off-by: Matheus Cruz * Clean events after each test Signed-off-by: Matheus Cruz * Add license header Signed-off-by: Matheus Cruz * Apply pull request suggestions Signed-off-by: Matheus Cruz --------- Signed-off-by: Matheus Cruz Co-authored-by: salaboy Signed-off-by: salaboy --- .../pubsub/outbox/DaprPubSubOutboxIT.java | 130 ++++++++++++++++++ .../testcontainers/pubsub/outbox/Product.java | 63 +++++++++ .../outbox/ProductWebhookController.java | 37 +++++ .../outbox/TestPubsubOutboxApplication.java | 23 ++++ .../examples/producer/OrderDTO.java | 31 +++++ .../producer/OrdersRestController.java | 33 ++++- .../producer/DaprTestContainersConfig.java | 14 +- .../examples/producer/ProducerAppIT.java | 27 +++- .../TestSubscriberRestController.java | 9 ++ 9 files changed, 360 insertions(+), 7 deletions(-) create mode 100644 sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java create mode 100644 sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/Product.java create mode 100644 sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java create mode 100644 sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/TestPubsubOutboxApplication.java create mode 100644 spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrderDTO.java diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java new file mode 100644 index 0000000000..423ae05e55 --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java @@ -0,0 +1,130 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.it.testcontainers.pubsub.outbox; + +import io.dapr.client.DaprClient; +import io.dapr.client.domain.ExecuteStateTransactionRequest; +import io.dapr.client.domain.State; +import io.dapr.client.domain.TransactionalStateOperation; +import io.dapr.it.testcontainers.DaprClientFactory; +import io.dapr.testcontainers.Component; +import io.dapr.testcontainers.DaprContainer; +import io.dapr.testcontainers.DaprLogLevel; +import org.assertj.core.api.Assertions; +import org.awaitility.Awaitility; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.DynamicPropertyRegistry; +import org.springframework.test.context.DynamicPropertySource; +import org.testcontainers.containers.Network; +import org.testcontainers.containers.wait.strategy.Wait; +import org.testcontainers.junit.jupiter.Container; +import org.testcontainers.junit.jupiter.Testcontainers; + +import java.time.Duration; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import static io.dapr.it.testcontainers.ContainerConstants.DAPR_RUNTIME_IMAGE_TAG; + +@SpringBootTest( + webEnvironment = SpringBootTest.WebEnvironment.DEFINED_PORT, + classes = { + TestPubsubOutboxApplication.class + } +) +@Testcontainers +@Tag("testcontainers") +public class DaprPubSubOutboxIT { + + private static final Logger LOG = LoggerFactory.getLogger(DaprPubSubOutboxIT.class); + private static final Network DAPR_NETWORK = Network.newNetwork(); + private static final Random RANDOM = new Random(); + private static final int PORT = RANDOM.nextInt(1000) + 8000; + private static final String APP_FOUND_MESSAGE_PATTERN = ".*application discovered on port.*"; + + private static final String PUBSUB_APP_ID = "pubsub-dapr-app"; + private static final String PUBSUB_NAME = "pubsub"; + + // topics + private static final String TOPIC_PRODUCT_CREATED = "product.created"; + private static final String STATE_STORE_NAME = "kvstore"; + + @Container + private static final DaprContainer DAPR_CONTAINER = new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) + .withAppName(PUBSUB_APP_ID) + .withNetwork(DAPR_NETWORK) + .withComponent(new Component(STATE_STORE_NAME, "state.in-memory", "v1", Map.of( + "outboxPublishPubsub", PUBSUB_NAME, + "outboxPublishTopic", TOPIC_PRODUCT_CREATED + ))) + .withComponent(new Component(PUBSUB_NAME, "pubsub.in-memory", "v1", Collections.emptyMap())) + .withDaprLogLevel(DaprLogLevel.DEBUG) + .withLogConsumer(outputFrame -> LOG.info(outputFrame.getUtf8String())) + .withAppChannelAddress("host.testcontainers.internal") + .withAppPort(PORT); + + /** + * Expose the Dapr ports to the host. + * + * @param registry the dynamic property registry + */ + @DynamicPropertySource + static void daprProperties(DynamicPropertyRegistry registry) { + registry.add("dapr.http.endpoint", DAPR_CONTAINER::getHttpEndpoint); + registry.add("dapr.grpc.endpoint", DAPR_CONTAINER::getGrpcEndpoint); + registry.add("server.port", () -> PORT); + } + + + @BeforeEach + public void setUp() { + org.testcontainers.Testcontainers.exposeHostPorts(PORT); + } + + + @Test + public void shouldPublishUsingOutbox() throws Exception { + Wait.forLogMessage(APP_FOUND_MESSAGE_PATTERN, 1).waitUntilReady(DAPR_CONTAINER); + + try (DaprClient client = DaprClientFactory.createDaprClientBuilder(DAPR_CONTAINER).build()) { + + ExecuteStateTransactionRequest transactionRequest = new ExecuteStateTransactionRequest(STATE_STORE_NAME); + + Product pencil = new Product("Pencil", 1.50); + State state = new State<>( + pencil.getId(), pencil, null + ); + + TransactionalStateOperation operation = new TransactionalStateOperation<>( + TransactionalStateOperation.OperationType.UPSERT, state + ); + + transactionRequest.setOperations(List.of(operation)); + + client.executeStateTransaction(transactionRequest).block(); + + Awaitility.await().atMost(Duration.ofSeconds(10)) + .ignoreExceptions() + .untilAsserted(() -> Assertions.assertThat(ProductWebhookController.EVENT_LIST).isNotEmpty()); + } + } + +} diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/Product.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/Product.java new file mode 100644 index 0000000000..62c37d320c --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/Product.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ +package io.dapr.it.testcontainers.pubsub.outbox; + +import java.util.UUID; + +public class Product { + private String id; + private String name; + private double price; + + public Product() { + } + + public Product(String name, double price) { + this.id = UUID.randomUUID().toString(); + this.name = name; + this.price = price; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public double getPrice() { + return price; + } + + public void setPrice(double price) { + this.price = price; + } + + @Override + public String toString() { + return "Product{" + + "id='" + id + '\'' + + ", name='" + name + '\'' + + ", price=" + price + + '}'; + } +} diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java new file mode 100644 index 0000000000..283dabf887 --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java @@ -0,0 +1,37 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ +package io.dapr.it.testcontainers.pubsub.outbox; + +import io.dapr.Topic; +import io.dapr.client.domain.CloudEvent; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RequestMapping; +import org.springframework.web.bind.annotation.RestController; + +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +@RestController +@RequestMapping("/webhooks/products") +public class ProductWebhookController { + + public static final List> EVENT_LIST = new CopyOnWriteArrayList<>(); + + @PostMapping("/created") + @Topic(name = "product.created", pubsubName = "pubsub") + public void handleEvent(@RequestBody CloudEvent cloudEvent) { + System.out.println("Received product.created event: " + cloudEvent.getData()); + EVENT_LIST.add(cloudEvent); + } +} diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/TestPubsubOutboxApplication.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/TestPubsubOutboxApplication.java new file mode 100644 index 0000000000..88a64f6582 --- /dev/null +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/TestPubsubOutboxApplication.java @@ -0,0 +1,23 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ +package io.dapr.it.testcontainers.pubsub.outbox; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@SpringBootApplication +public class TestPubsubOutboxApplication { + public static void main(String[] args) { + SpringApplication.run(TestPubsubOutboxApplication.class, args); + } +} diff --git a/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrderDTO.java b/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrderDTO.java new file mode 100644 index 0000000000..68868e4310 --- /dev/null +++ b/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrderDTO.java @@ -0,0 +1,31 @@ +package io.dapr.springboot.examples.producer; + +public class OrderDTO { + + private String id; + private String item; + private Integer amount; + + public OrderDTO() { + } + + public OrderDTO(String id, String item, Integer amount) { + this.id = id; + this.item = item; + this.amount = amount; + } + + public String getId() { + return id; + } + + + public String getItem() { + return item; + } + + public Integer getAmount() { + return amount; + } + +} diff --git a/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrdersRestController.java b/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrdersRestController.java index 90384b8c05..15a1854635 100644 --- a/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrdersRestController.java +++ b/spring-boot-examples/producer-app/src/main/java/io/dapr/springboot/examples/producer/OrdersRestController.java @@ -13,6 +13,10 @@ package io.dapr.springboot.examples.producer; +import io.dapr.client.DaprClient; +import io.dapr.client.domain.ExecuteStateTransactionRequest; +import io.dapr.client.domain.State; +import io.dapr.client.domain.TransactionalStateOperation; import io.dapr.spring.data.repository.config.EnableDaprRepositories; import io.dapr.spring.messaging.DaprMessagingTemplate; import org.slf4j.Logger; @@ -24,11 +28,13 @@ import org.springframework.web.bind.annotation.RequestParam; import org.springframework.web.bind.annotation.RestController; +import java.util.List; + @RestController @EnableDaprRepositories public class OrdersRestController { - private final Logger logger = LoggerFactory.getLogger(OrdersRestController.class); + private static final Logger logger = LoggerFactory.getLogger(OrdersRestController.class); @Autowired private OrderRepository repository; @@ -36,6 +42,9 @@ public class OrdersRestController { @Autowired private DaprMessagingTemplate messagingTemplate; + @Autowired + private DaprClient daprClient; + /** * Store orders from customers. * @param order from the customer @@ -51,6 +60,28 @@ public String storeOrder(@RequestBody Order order) { return "Order Stored and Event Published"; } + @PostMapping("/orders/outbox") + public String storeOrderOutbox(@RequestBody Order order) { + logger.info("Storing Order with Outbox: {}", order); + ExecuteStateTransactionRequest transactionRequest = new ExecuteStateTransactionRequest("kvstore-outbox"); + + State state = new State<>( + order.getId(), order, null + ); + + TransactionalStateOperation operation = new TransactionalStateOperation<>( + TransactionalStateOperation.OperationType.UPSERT, state + ); + + transactionRequest.setOperations(List.of(operation)); + + daprClient.executeStateTransaction(transactionRequest).block(); + + logger.info("Order Stored with Outbox: {}", order); + + return "Order Stored with Outbox"; + } + @GetMapping("/orders") public Iterable getAll() { return repository.findAll(); diff --git a/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/DaprTestContainersConfig.java b/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/DaprTestContainersConfig.java index 95603a36f1..5de8d19504 100644 --- a/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/DaprTestContainersConfig.java +++ b/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/DaprTestContainersConfig.java @@ -41,7 +41,7 @@ public class DaprTestContainersConfig { static final String CONNECTION_STRING = "host=postgres user=postgres password=password port=5432 connect_timeout=10 database=dapr_db_repository"; static final Map STATE_STORE_PROPERTIES = createStateStoreProperties(); - + static final Map STATE_STORE_OUTBOX_PROPERTIES = createStateStoreOutboxProperties(); static final Map BINDING_PROPERTIES = Collections.singletonMap("connectionString", CONNECTION_STRING); @@ -118,9 +118,8 @@ public DaprContainer daprContainer(Network daprNetwork, PostgreSQLContainer p .withComponent(new Component("kvstore", "state.postgresql", "v1", STATE_STORE_PROPERTIES)) .withComponent(new Component("kvbinding", "bindings.postgresql", "v1", BINDING_PROPERTIES)) .withComponent(new Component("pubsub", "pubsub.rabbitmq", "v1", rabbitMqProperties)) + .withComponent(new Component("kvstore-outbox", "state.postgresql", "v1", STATE_STORE_OUTBOX_PROPERTIES)) .withSubscription(new Subscription("app", "pubsub", "topic", "/subscribe")) -// .withDaprLogLevel(DaprLogLevel.DEBUG) -// .withLogConsumer(outputFrame -> System.out.println(outputFrame.getUtf8String())) .withAppPort(8080) .withAppHealthCheckPath("/actuator/health") .withAppChannelAddress("host.testcontainers.internal") @@ -139,5 +138,14 @@ private static Map createStateStoreProperties() { return result; } + private static Map createStateStoreOutboxProperties() { + Map result = new HashMap<>(); + result.put("connectionString", CONNECTION_STRING); + result.put("outboxPublishPubsub", "pubsub"); + result.put("outboxPublishTopic", "outbox-topic"); + + return result; + } + } diff --git a/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/ProducerAppIT.java b/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/ProducerAppIT.java index 24fa34c6fb..a0c561b931 100644 --- a/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/ProducerAppIT.java +++ b/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/ProducerAppIT.java @@ -21,6 +21,7 @@ import io.dapr.testcontainers.DaprContainer; import io.restassured.RestAssured; import io.restassured.http.ContentType; +import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.springframework.beans.factory.annotation.Autowired; @@ -67,12 +68,32 @@ void setUp() { } + @AfterEach + void cleanUp() { + controller.getAllEvents().clear(); + } @Test - void testOrdersEndpointAndMessaging() throws InterruptedException, IOException { + void testOrdersOutboxEndpointAndMessaging() { + OrderDTO order = new OrderDTO("outbox-order-123", "Lorem ipsum", 1000); + + given().contentType(ContentType.JSON) + .body(order) + .when() + .post("/orders/outbox") + .then() + .statusCode(200); + + await().atMost(Duration.ofSeconds(15)) + .until(controller.getAllEvents()::size, equalTo(1)); + + } + @Test + void testOrdersEndpointAndMessaging() { + OrderDTO order = new OrderDTO("abc-123", "the mars volta LP", 1); given().contentType(ContentType.JSON) - .body("{ \"id\": \"abc-123\",\"item\": \"the mars volta LP\",\"amount\": 1}") + .body(order) .when() .post("/orders") .then() @@ -118,7 +139,7 @@ void testOrdersEndpointAndMessaging() throws InterruptedException, IOException { } @Test - void testCustomersWorkflows() throws InterruptedException, IOException { + void testCustomersWorkflows() { given().contentType(ContentType.JSON) .body("{\"customerName\": \"salaboy\"}") diff --git a/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/TestSubscriberRestController.java b/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/TestSubscriberRestController.java index 0f39dd9a97..0f69e28aa2 100644 --- a/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/TestSubscriberRestController.java +++ b/spring-boot-examples/producer-app/src/test/java/io/dapr/springboot/examples/producer/TestSubscriberRestController.java @@ -38,6 +38,15 @@ public void subscribe(@RequestBody CloudEvent cloudEvent){ events.add(cloudEvent); } + @PostMapping("outbox-subscribe") + @Topic(pubsubName = "pubsub", name = "outbox-topic") + public void outboxSubscribe(@RequestBody CloudEvent cloudEvent) { + // we are receiving the Order with CloudEvent as String due to the + // following issue https://github.com/dapr/java-sdk/issues/1580 + logger.info("Outbox Order Event Received: " + cloudEvent.getData()); + events.add(cloudEvent); + } + public List getAllEvents() { return events; } From 1b99814e2d83b1489e8693cacd1fd4396347012f Mon Sep 17 00:00:00 2001 From: salaboy Date: Fri, 24 Oct 2025 23:00:48 +0100 Subject: [PATCH 05/17] adding new method signature plus test (#1570) * adding new method signature plus test Signed-off-by: salaboy * re adding imports Signed-off-by: salaboy * fixing style Signed-off-by: salaboy * checking empty metadata Signed-off-by: salaboy * copy meta for safety and check if key is present Signed-off-by: salaboy * Centralize Maven dependency version management (#1564) Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * reverting pom Signed-off-by: salaboy * fix codestyle Signed-off-by: salaboy * using metaCopy Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Artur Ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy --- .../io/dapr/client/AbstractDaprClient.java | 22 +++++++ .../main/java/io/dapr/client/DaprClient.java | 15 +++++ .../io/dapr/client/DaprClientGrpcTest.java | 58 +++++++++++++++++++ 3 files changed, 95 insertions(+) diff --git a/sdk/src/main/java/io/dapr/client/AbstractDaprClient.java b/sdk/src/main/java/io/dapr/client/AbstractDaprClient.java index 203f3f00a2..c9c4bf8ca4 100644 --- a/sdk/src/main/java/io/dapr/client/AbstractDaprClient.java +++ b/sdk/src/main/java/io/dapr/client/AbstractDaprClient.java @@ -51,6 +51,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -509,6 +510,27 @@ public Mono saveState(String storeName, String key, String etag, Object va return this.saveBulkState(storeName, Collections.singletonList(state)); } + /** + * {@inheritDoc} + */ + @Override + public Mono saveState(String storeName, String key, String etag, Object value, Map meta, + StateOptions options) { + Map metaCopy = null; + if (meta == null) { + metaCopy = new HashMap<>(); + } else { + metaCopy = new HashMap<>(meta); + } + + if (value != null) { + metaCopy.putIfAbsent("contentType", stateSerializer.getContentType()); + } + + State state = new State<>(key, value, etag, metaCopy, options); + return this.saveBulkState(storeName, Collections.singletonList(state)); + } + /** * {@inheritDoc} */ diff --git a/sdk/src/main/java/io/dapr/client/DaprClient.java b/sdk/src/main/java/io/dapr/client/DaprClient.java index f51a6d0ffb..6ac6086e76 100644 --- a/sdk/src/main/java/io/dapr/client/DaprClient.java +++ b/sdk/src/main/java/io/dapr/client/DaprClient.java @@ -498,6 +498,21 @@ Mono executeStateTransaction(String storeName, */ Mono saveState(String storeName, String key, String etag, Object value, StateOptions options); + + /** + * Save/Update a state. + * + * @param storeName The name of the state store. + * @param key The key of the state. + * @param etag The etag to be used. + * @param value The value of the state. + * @param meta The metadata to be set to the state. + * @param options The Options to use for each state. + * @return a Mono plan of type Void. + */ + Mono saveState(String storeName, String key, String etag, Object value, Map meta, + StateOptions options); + /** * Delete a state. * diff --git a/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java b/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java index 54240d70ca..7ac6ab3cf5 100644 --- a/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java +++ b/sdk/src/test/java/io/dapr/client/DaprClientGrpcTest.java @@ -1241,6 +1241,64 @@ public void saveStateNoOptionsTest() { result.block(); } + @Test + public void saveStateWithMetaTest() { + String key = "key1"; + String etag = "ETag1"; + String value = "State value"; + Map metadata = new HashMap<>(); + metadata.put("custom", "customValue"); + ArgumentCaptor argument = ArgumentCaptor.forClass(DaprProtos.SaveStateRequest.class); + doAnswer((Answer) invocation -> { + StreamObserver observer = (StreamObserver) invocation.getArguments()[1]; + observer.onNext(Empty.getDefaultInstance()); + observer.onCompleted(); + return null; + }).when(daprStub).saveState(argument.capture(), any()); + + + Mono result = client.saveState(STATE_STORE_NAME, key, etag, value, metadata,null); + result.block(); + assertEquals("customValue", argument.getValue().getStates(0).getMetadata().get("custom")); + } + + @Test + public void saveStateWithMetaContentTypeTest() { + String key = "key1"; + String etag = "ETag1"; + String value = "State value"; + Map metadata = new HashMap<>(); + ArgumentCaptor argument = ArgumentCaptor.forClass(DaprProtos.SaveStateRequest.class); + doAnswer((Answer) invocation -> { + StreamObserver observer = (StreamObserver) invocation.getArguments()[1]; + observer.onNext(Empty.getDefaultInstance()); + observer.onCompleted(); + return null; + }).when(daprStub).saveState(argument.capture(), any()); + + + Mono result = client.saveState(STATE_STORE_NAME, key, etag, value, metadata,null); + result.block(); + assertEquals("application/json", argument.getValue().getStates(0).getMetadata().get("contentType")); + } + + @Test + public void saveStateWithMetaEmptyTest() { + String key = "key1"; + String etag = "ETag1"; + ArgumentCaptor argument = ArgumentCaptor.forClass(DaprProtos.SaveStateRequest.class); + doAnswer((Answer) invocation -> { + StreamObserver observer = (StreamObserver) invocation.getArguments()[1]; + observer.onNext(Empty.getDefaultInstance()); + observer.onCompleted(); + return null; + }).when(daprStub).saveState(argument.capture(), any()); + + Mono result = client.saveState(STATE_STORE_NAME, key, etag, null, null,null); + result.block(); + assertTrue(argument.getValue().getStates(0).getMetadata().keySet().isEmpty()); + } + @Test public void saveStateTest() { String key = "key1"; From a117f6e602f0ead3b34293dc64dbd257c24a8699 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Oct 2025 07:00:49 -0700 Subject: [PATCH 06/17] Bump actions/upload-artifact from 4 to 5 (#1587) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: salaboy --- .github/workflows/build.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e87df82e81..96d7563b26 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,12 +34,12 @@ jobs: - name: Codecov uses: codecov/codecov-action@v5.5.1 - name: Upload test report for sdk - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: test-dapr-java-sdk-jdk${{ env.JDK_VER }} path: sdk/target/jacoco-report/ - name: Upload test report for sdk-actors - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: report-dapr-java-sdk-actors-jdk${{ env.JDK_VER }} path: sdk-actors/target/jacoco-report/ @@ -157,13 +157,13 @@ jobs: DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Upload failsafe test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: failsafe-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/failsafe-reports - name: Upload surefire test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v5 with: name: surefire-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/surefire-reports From a9045beea1ddf2cfa3caef673a7ceb74cde6cedc Mon Sep 17 00:00:00 2001 From: wlfgang <14792753+wlfgang@users.noreply.github.com> Date: Mon, 27 Oct 2025 18:00:43 -0400 Subject: [PATCH 07/17] Add gRPC support to Dapr testcontainer (#1586) * Add gRPC support to Dapr testcontainer Signed-off-by: wlfgang * Avoid using null to indicate default value Signed-off-by: wlfgang --------- Signed-off-by: wlfgang Co-authored-by: artur-ciocanu Co-authored-by: wlfgang Signed-off-by: salaboy --- .../io/dapr/testcontainers/DaprContainer.java | 18 +++++++++++++++--- .../dapr/testcontainers/DaprContainerTest.java | 18 ++++++++++++++++++ 2 files changed, 33 insertions(+), 3 deletions(-) diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainer.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainer.java index 51dce6f076..823f6f61f7 100644 --- a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainer.java +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/DaprContainer.java @@ -48,7 +48,6 @@ public class DaprContainer extends GenericContainer { private static final Logger LOGGER = LoggerFactory.getLogger(DaprContainer.class); private static final int DAPRD_DEFAULT_HTTP_PORT = 3500; private static final int DAPRD_DEFAULT_GRPC_PORT = 50001; - private static final DaprProtocol DAPR_PROTOCOL = DaprProtocol.HTTP; private static final DockerImageName DEFAULT_IMAGE_NAME = DockerImageName.parse(DAPR_RUNTIME_IMAGE_TAG); private static final Yaml YAML_MAPPER = YamlMapperFactory.create(); @@ -76,6 +75,7 @@ public class DaprContainer extends GenericContainer { private DaprSchedulerContainer schedulerContainer; private String appName; private Integer appPort; + private DaprProtocol appProtocol = DaprProtocol.HTTP; // default from docs private String appHealthCheckPath; private Integer appHealthCheckProbeInterval = 5; //default from docs private Integer appHealthCheckProbeTimeout = 500; //default from docs @@ -126,6 +126,11 @@ public DaprContainer withAppPort(Integer port) { return this; } + public DaprContainer withAppProtocol(DaprProtocol protocol) { + this.appProtocol = protocol; + return this; + } + public DaprContainer withAppChannelAddress(String appChannelAddress) { this.appChannelAddress = appChannelAddress; return this; @@ -308,8 +313,6 @@ protected void configure() { cmds.add("--app-id"); cmds.add(appName); cmds.add("--dapr-listen-addresses=0.0.0.0"); - cmds.add("--app-protocol"); - cmds.add(DAPR_PROTOCOL.getName()); cmds.add("--placement-host-address"); cmds.add(placementService + ":50005"); cmds.add("--scheduler-host-address"); @@ -325,6 +328,11 @@ protected void configure() { cmds.add(Integer.toString(appPort)); } + if (appProtocol != null) { + cmds.add("--app-protocol"); + cmds.add(appProtocol.getName()); + } + if (appHealthCheckPath != null && !appHealthCheckPath.isEmpty()) { cmds.add("--enable-app-health-check"); cmds.add("--app-health-check-path"); @@ -413,6 +421,10 @@ public Integer getAppPort() { return appPort; } + public DaprProtocol getAppProtocol() { + return appProtocol; + } + public String getAppHealthCheckPath() { return appHealthCheckPath; } diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprContainerTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprContainerTest.java index c930e98511..8281aae80e 100644 --- a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprContainerTest.java +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/DaprContainerTest.java @@ -78,4 +78,22 @@ public void appHealthParametersDefaultsTest(){ assertEquals(3, dapr2.getAppHealthCheckThreshold()); } + + @Test + public void appProtocolDefaultsTest() { + try (DaprContainer daprContainer = new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) + .withAppName("dapr-app")) { + daprContainer.configure(); + assertEquals(DaprProtocol.HTTP, daprContainer.getAppProtocol()); + } + + DaprProtocol protocol = DaprProtocol.GRPC; + try (DaprContainer daprContainer = new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) + .withAppName("dapr-app4") + .withAppProtocol(protocol)) { + daprContainer.configure(); + assertEquals(protocol, daprContainer.getAppProtocol()); + } + + } } From 94ac2eb0c747086393a6d7b01596a2e71c2be6f0 Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Sun, 2 Nov 2025 20:35:36 +0200 Subject: [PATCH 08/17] Use dependencies BOM and remove duplicates. (#1588) Signed-off-by: Artur Ciocanu Signed-off-by: salaboy --- pom.xml | 30 ++++++++++++++++++- sdk-springboot/pom.xml | 12 -------- sdk-tests/pom.xml | 8 ----- spring-boot-examples/pom.xml | 12 -------- .../workflows/multi-app/pom.xml | 12 -------- spring-boot-examples/workflows/pom.xml | 12 -------- 6 files changed, 29 insertions(+), 57 deletions(-) diff --git a/pom.xml b/pom.xml index 0a6a5360e2..b45636c3e1 100644 --- a/pom.xml +++ b/pom.xml @@ -48,7 +48,7 @@ 2.0 1.21.3 - 3.4.6 + 3.4.9 6.2.7 1.7.0 @@ -203,6 +203,34 @@ pom import + + io.opentelemetry + opentelemetry-bom + ${opentelemetry.version} + pom + import + + + com.fasterxml.jackson + jackson-bom + ${jackson.version} + pom + import + + + org.testcontainers + testcontainers-bom + ${testcontainers.version} + pom + import + + + com.google.protobuf + protobuf-bom + ${protobuf.version} + pom + import + org.testcontainers postgresql diff --git a/sdk-springboot/pom.xml b/sdk-springboot/pom.xml index 295b7ae255..8e9b4bb852 100644 --- a/sdk-springboot/pom.xml +++ b/sdk-springboot/pom.xml @@ -15,18 +15,6 @@ dapr-sdk-springboot SDK extension for Springboot - - - - org.springframework.boot - spring-boot-dependencies - ${springboot.version} - pom - import - - - - diff --git a/sdk-tests/pom.xml b/sdk-tests/pom.xml index 553e4a9a8d..1b66f08071 100644 --- a/sdk-tests/pom.xml +++ b/sdk-tests/pom.xml @@ -29,15 +29,7 @@ - - - org.springframework.boot - spring-boot-dependencies - ${springboot.version} - pom - import - org.junit.platform junit-platform-commons diff --git a/spring-boot-examples/pom.xml b/spring-boot-examples/pom.xml index 198d71b355..928edb1df2 100644 --- a/spring-boot-examples/pom.xml +++ b/spring-boot-examples/pom.xml @@ -22,18 +22,6 @@ workflows - - - - org.springframework.boot - spring-boot-dependencies - ${springboot.version} - pom - import - - - - diff --git a/spring-boot-examples/workflows/multi-app/pom.xml b/spring-boot-examples/workflows/multi-app/pom.xml index e066d03373..f6798d7437 100644 --- a/spring-boot-examples/workflows/multi-app/pom.xml +++ b/spring-boot-examples/workflows/multi-app/pom.xml @@ -22,18 +22,6 @@ worker-two - - - - org.springframework.boot - spring-boot-dependencies - ${springboot.version} - pom - import - - - - diff --git a/spring-boot-examples/workflows/pom.xml b/spring-boot-examples/workflows/pom.xml index d3d0f6c837..1c711fbc12 100644 --- a/spring-boot-examples/workflows/pom.xml +++ b/spring-boot-examples/workflows/pom.xml @@ -21,18 +21,6 @@ multi-app - - - - org.springframework.boot - spring-boot-dependencies - ${springboot.version} - pom - import - - - - From f865f6f3609430a6035eb5e4b8d6a7f2e2400fba Mon Sep 17 00:00:00 2001 From: Cassie Coyle Date: Thu, 6 Nov 2025 14:35:26 +0000 Subject: [PATCH 09/17] Examples + Docs for App API Token authentication for gRPC and HTTP (#1589) * example Signed-off-by: Cassandra Coyle * docs for example Signed-off-by: Cassandra Coyle --------- Signed-off-by: Cassandra Coyle Signed-off-by: salaboy --- .../en/java-sdk-docs/java-client/_index.md | 118 ++++++++++++++++++ .../java-sdk-docs/java-client/properties.md | 9 +- .../io/dapr/examples/bindings/http/README.md | 16 +++ .../main/java/io/dapr/examples/jobs/README.md | 16 +++ .../java/io/dapr/examples/pubsub/README.md | 82 ++++++++++++ .../dapr/examples/pubsub/grpc/Subscriber.java | 5 +- .../pubsub/grpc/SubscriberGrpcService.java | 41 ++++++ 7 files changed, 283 insertions(+), 4 deletions(-) diff --git a/daprdocs/content/en/java-sdk-docs/java-client/_index.md b/daprdocs/content/en/java-sdk-docs/java-client/_index.md index c162b16bed..5f33eb41e3 100644 --- a/daprdocs/content/en/java-sdk-docs/java-client/_index.md +++ b/daprdocs/content/en/java-sdk-docs/java-client/_index.md @@ -632,6 +632,124 @@ try (DaprClient client = new DaprClientBuilder().build()) { Learn more about the [Dapr Java SDK packages available to add to your Java applications](https://dapr.github.io/java-sdk/). +## Security + +### App API Token Authentication + +The building blocks like pubsub, input bindings, or jobs require Dapr to make incoming calls to your application, you can secure these requests using [Dapr App API Token Authentication]({{% ref app-api-token.md %}}). This ensures that only Dapr can invoke your application's endpoints. + +#### Understanding the two tokens + +Dapr uses two different tokens for securing communication. See [Properties]({{% ref properties.md %}}) for detailed information about both tokens: + +- **`DAPR_API_TOKEN`** (Your app → Dapr sidecar): Automatically handled by the Java SDK when using `DaprClient` +- **`APP_API_TOKEN`** (Dapr → Your app): Requires server-side validation in your application + +The examples below show how to implement server-side validation for `APP_API_TOKEN`. + +#### Implementing server-side token validation + +When using gRPC protocol, implement a server interceptor to capture the metadata. + +```java +import io.grpc.Context; +import io.grpc.Contexts; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; + +public class SubscriberGrpcService extends AppCallbackGrpc.AppCallbackImplBase { + public static final Context.Key METADATA_KEY = Context.key("grpc-metadata"); + + // gRPC interceptor to capture metadata + public static class MetadataInterceptor implements ServerInterceptor { + @Override + public ServerCall.Listener interceptCall( + ServerCall call, + Metadata headers, + ServerCallHandler next) { + Context contextWithMetadata = Context.current().withValue(METADATA_KEY, headers); + return Contexts.interceptCall(contextWithMetadata, call, headers, next); + } + } + + // Your service methods go here... +} +``` + +Register the interceptor when building your gRPC server: + +```java +Server server = ServerBuilder.forPort(port) + .intercept(new SubscriberGrpcService.MetadataInterceptor()) + .addService(new SubscriberGrpcService()) + .build(); +server.start(); +``` + +Then, in your service methods, extract the token from metadata: + +```java +@Override +public void onTopicEvent(DaprAppCallbackProtos.TopicEventRequest request, + StreamObserver responseObserver) { + try { + // Extract metadata from context + Context context = Context.current(); + Metadata metadata = METADATA_KEY.get(context); + + if (metadata != null) { + String apiToken = metadata.get( + Metadata.Key.of("dapr-api-token", Metadata.ASCII_STRING_MARSHALLER)); + + // Validate token accordingly + } + + // Process the request + // ... + + } catch (Throwable e) { + responseObserver.onError(e); + } +} +``` + +#### Using with HTTP endpoints + +For HTTP-based endpoints, extract the token from the headers: + +```java +@RestController +public class SubscriberController { + + @PostMapping(path = "/endpoint") + public Mono handleRequest( + @RequestBody(required = false) byte[] body, + @RequestHeader Map headers) { + return Mono.fromRunnable(() -> { + try { + // Extract the token from headers + String apiToken = headers.get("dapr-api-token"); + + // Validate token accordingly + + // Process the request + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } +} +``` + +#### Examples + +For working examples with pubsub, bindings, and jobs: +- [PubSub with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/pubsub#app-api-token-authentication-optional) +- [Bindings with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http#app-api-token-authentication-optional) +- [Jobs with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/jobs#app-api-token-authentication-optional) + ## Related links - [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples) diff --git a/daprdocs/content/en/java-sdk-docs/java-client/properties.md b/daprdocs/content/en/java-sdk-docs/java-client/properties.md index 2b26d87bdf..87eb7a99c4 100644 --- a/daprdocs/content/en/java-sdk-docs/java-client/properties.md +++ b/daprdocs/content/en/java-sdk-docs/java-client/properties.md @@ -32,11 +32,16 @@ When these variables are set, the client will automatically use them to connect | `DAPR_GRPC_PORT` | The gRPC port for the Dapr sidecar (legacy, `DAPR_GRPC_ENDPOINT` takes precedence) | `50001` | | `DAPR_HTTP_PORT` | The HTTP port for the Dapr sidecar (legacy, `DAPR_HTTP_ENDPOINT` takes precedence) | `3500` | -### API Token +### API Tokens + +Dapr supports two types of API tokens for securing communication: | Environment Variable | Description | Default | |---------------------|-------------|---------| -| `DAPR_API_TOKEN` | API token for authentication between app and Dapr sidecar. This is the same token used by the Dapr runtime for API authentication. For more details, see [Dapr API token authentication](https://docs.dapr.io/operations/security/api-token/) and [Environment variables reference](https://docs.dapr.io/reference/environment/#dapr_api_token). | `null` | +| `DAPR_API_TOKEN` | API token for authenticating requests **from your app to the Dapr sidecar**. The Java SDK automatically includes this token in requests when using `DaprClient`. | `null` | +| `APP_API_TOKEN` | API token for authenticating requests **from Dapr to your app**. When set, Dapr includes this token in the `dapr-api-token` header/metadata when calling your application (for pubsub subscribers, input bindings, or job triggers). Your application must validate this token. | `null` | + +For implementation examples, see [App API Token Authentication]({{% ref java-client#app-api-token-authentication %}}). For more details, see [Dapr API token authentication](https://docs.dapr.io/operations/security/api-token/). ### gRPC Configuration diff --git a/examples/src/main/java/io/dapr/examples/bindings/http/README.md b/examples/src/main/java/io/dapr/examples/bindings/http/README.md index cd9ce42062..74a4176ad5 100644 --- a/examples/src/main/java/io/dapr/examples/bindings/http/README.md +++ b/examples/src/main/java/io/dapr/examples/bindings/http/README.md @@ -75,6 +75,22 @@ b95e7ad31707 confluentinc/cp-zookeeper:7.4.4 "/etc/confluent/dock…" 5 da ``` Click [here](https://github.com/wurstmeister/kafka-docker) for more information about the kafka broker server. +### App API Token Authentication (Optional) + +Dapr supports API token authentication to secure communication between Dapr and your application. When using input bindings, Dapr makes incoming calls to your app, and you can validate these requests using the `APP_API_TOKEN`. + +For detailed implementation with gRPC interceptors, see the [PubSub README App API Token Authentication section](../pubsub/README.md#app-api-token-authentication-optional). + +For HTTP-based apps, check the `dapr-api-token` header in incoming requests. For more details, see the [Dapr App API Token Authentication documentation](https://docs.dapr.io/operations/security/app-api-token/). + +**Quick setup:** + +```bash +# Export tokens before running the following `dapr run` commands. +export APP_API_TOKEN="your-app-api-token" +export DAPR_API_TOKEN="your-dapr-api-token" +``` + ### Running the Input binding sample The input binding sample uses the Spring Boot´s DaprApplication class for initializing the `InputBindingController`. In `InputBindingExample.java` file, you will find the `InputBindingExample` class and the `main` method. See the code snippet below: diff --git a/examples/src/main/java/io/dapr/examples/jobs/README.md b/examples/src/main/java/io/dapr/examples/jobs/README.md index 2877b31fba..4b899ac4a6 100644 --- a/examples/src/main/java/io/dapr/examples/jobs/README.md +++ b/examples/src/main/java/io/dapr/examples/jobs/README.md @@ -44,6 +44,22 @@ cd examples Run `dapr init` to initialize Dapr in Self-Hosted Mode if it's not already initialized. +### App API Token Authentication (Optional) + +Dapr supports API token authentication to secure communication between Dapr and your application. When using the Jobs API, Dapr makes incoming calls to your app at job trigger time, and you can validate these requests using the `APP_API_TOKEN`. + +For detailed implementation with gRPC interceptors, see the [PubSub README App API Token Authentication section](../pubsub/README.md#app-api-token-authentication-optional). + +For more details, see the [Dapr App API Token Authentication documentation](https://docs.dapr.io/operations/security/app-api-token/). + +**Quick setup:** + +```bash +# Export tokens before running the following `dapr run` commands. +export APP_API_TOKEN="your-app-api-token" +export DAPR_API_TOKEN="your-dapr-api-token" +``` + ### Running the example This example uses the Java SDK Dapr client in order to **Schedule and Get** Jobs. diff --git a/examples/src/main/java/io/dapr/examples/pubsub/README.md b/examples/src/main/java/io/dapr/examples/pubsub/README.md index 4fb16290ca..6aa5d401de 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/README.md +++ b/examples/src/main/java/io/dapr/examples/pubsub/README.md @@ -41,6 +41,88 @@ cd examples Run `dapr init` to initialize Dapr in Self-Hosted Mode if it's not already initialized. +### App API Token Authentication (Optional) + +Dapr supports API token authentication to secure communication between Dapr and your application. This feature is useful for numerous APIs like pubsub, bindings, and jobs building blocks where Dapr makes incoming calls to your app. + +For more details, see the [Dapr App API Token Authentication documentation](https://docs.dapr.io/operations/security/app-api-token/). + +#### How it works + +When `APP_API_TOKEN` is set, Dapr includes the token in the gRPC metadata header `dapr-api-token` when calling your app. Your app can validate this token to authenticate requests from Dapr. + +#### Setting up tokens + +Set a dapr annotation or simply export the environment variables before running your Dapr applications: + +```bash +# Token for your app to authenticate requests FROM Dapr +export APP_API_TOKEN="your-app-api-token" + +# Token for Dapr client to authenticate requests TO Dapr sidecar +export DAPR_API_TOKEN="your-dapr-api-token" +``` + +#### Using with gRPC Subscriber + +The gRPC subscriber example includes a `MetadataInterceptor` (see `SubscriberGrpcService.java`) that captures the `dapr-api-token` from incoming requests: + +```java +public class SubscriberGrpcService extends AppCallbackGrpc.AppCallbackImplBase { + public static final Context.Key METADATA_KEY = Context.key("grpc-metadata"); + + // gRPC interceptor to capture metadata + public static class MetadataInterceptor implements ServerInterceptor { + @Override + public ServerCall.Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + Context contextWithMetadata = Context.current().withValue(METADATA_KEY, headers); + return Contexts.interceptCall(contextWithMetadata, call, headers, next); + } + } +} +``` + +Then in your service methods, you can extract and validate the token: + +```java +Context context = Context.current(); +Metadata metadata = METADATA_KEY.get(context); +String apiToken = metadata.get(Metadata.Key.of("dapr-api-token", Metadata.ASCII_STRING_MARSHALLER)); + +// Validate token accordingly +``` + +#### Using with HTTP Subscriber + +For HTTP-based endpoints, extract the token from the headers: + +```java +@RestController +public class SubscriberController { + + @PostMapping(path = "/endpoint") + public Mono handleRequest( + @RequestBody(required = false) byte[] body, + @RequestHeader Map headers) { + return Mono.fromRunnable(() -> { + try { + // Extract the token from headers + String apiToken = headers.get("dapr-api-token"); + + // Validate token accordingly + + // Process the request + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + } +} +``` + +Then use the standard `dapr run` commands shown in the sections below. The subscriber will validate incoming requests from Dapr using `APP_API_TOKEN`, and both applications will authenticate to Dapr using `DAPR_API_TOKEN`. + ### Running the publisher The publisher is a simple Java application with a main method that uses the Dapr gRPC Client to publish 10 messages to a specific topic. diff --git a/examples/src/main/java/io/dapr/examples/pubsub/grpc/Subscriber.java b/examples/src/main/java/io/dapr/examples/pubsub/grpc/Subscriber.java index 4c9cff939a..8c3c570887 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/grpc/Subscriber.java +++ b/examples/src/main/java/io/dapr/examples/pubsub/grpc/Subscriber.java @@ -48,8 +48,9 @@ public static void main(String[] args) throws Exception { int port = Integer.parseInt(cmd.getOptionValue("port")); //start a grpc server - Server server = ServerBuilder.forPort(port) - .addService(new SubscriberGrpcService()) + Server server = ServerBuilder.forPort(port) + .intercept(new SubscriberGrpcService.MetadataInterceptor()) + .addService(new SubscriberGrpcService()) .addService(new BulkSubscriberGrpcService()) .build(); server.start(); diff --git a/examples/src/main/java/io/dapr/examples/pubsub/grpc/SubscriberGrpcService.java b/examples/src/main/java/io/dapr/examples/pubsub/grpc/SubscriberGrpcService.java index d454280530..642d9e1b91 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/grpc/SubscriberGrpcService.java +++ b/examples/src/main/java/io/dapr/examples/pubsub/grpc/SubscriberGrpcService.java @@ -16,6 +16,12 @@ import com.google.protobuf.Empty; import io.dapr.v1.AppCallbackGrpc; import io.dapr.v1.DaprAppCallbackProtos; +import io.grpc.Context; +import io.grpc.Contexts; +import io.grpc.Metadata; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; import io.grpc.stub.StreamObserver; import java.util.ArrayList; @@ -27,6 +33,17 @@ public class SubscriberGrpcService extends AppCallbackGrpc.AppCallbackImplBase { private final List topicSubscriptionList = new ArrayList<>(); + public static final Context.Key METADATA_KEY = Context.key("grpc-metadata"); + // gRPC interceptor to capture metadata + public static class MetadataInterceptor implements ServerInterceptor { + @Override + public ServerCall.Listener interceptCall( + ServerCall call, Metadata headers, ServerCallHandler next) { + Context contextWithMetadata = Context.current().withValue(METADATA_KEY, headers); + return Contexts.interceptCall(contextWithMetadata, call, headers, next); + } + } + @Override public void listTopicSubscriptions(Empty request, StreamObserver responseObserver) { @@ -50,6 +67,30 @@ public void listTopicSubscriptions(Empty request, public void onTopicEvent(DaprAppCallbackProtos.TopicEventRequest request, StreamObserver responseObserver) { try { + try { + Context context = Context.current(); + Metadata metadata = METADATA_KEY.get(context); + + if (metadata != null) { + System.out.println("Metadata found in context"); + String apiToken = metadata.get(Metadata.Key.of("dapr-api-token", Metadata.ASCII_STRING_MARSHALLER)); + if (apiToken != null) { + System.out.println("API Token extracted: " + apiToken); + } else { + System.out.println("No 'dapr-api-token' found in metadata"); + } + System.out.println("All metadata:"); + for (String key : metadata.keys()) { + String value = metadata.get(Metadata.Key.of(key, Metadata.ASCII_STRING_MARSHALLER)); + System.out.println("key: " + key + ": " + value); + } + } else { + System.out.println("No metadata found in context"); + } + } catch (Exception e) { + System.out.println(" Error extracting metadata: " + e.getMessage()); + } + String data = request.getData().toStringUtf8().replace("\"", ""); System.out.println("Subscriber got: " + data); DaprAppCallbackProtos.TopicEventResponse response = DaprAppCallbackProtos.TopicEventResponse.newBuilder() From 12002debefa9dcb75d9e97da72e1da79572a5efe Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Fri, 28 Nov 2025 19:53:54 +0200 Subject: [PATCH 10/17] Another set of Maven version, properties and plugin improvements (#1596) Signed-off-by: salaboy --- .github/workflows/build.yml | 9 +- .github/workflows/validate.yml | 41 -- .mise.toml | 5 + .../dapr-spring-boot-autoconfigure/pom.xml | 1 + dapr-spring/dapr-spring-boot-tests/pom.xml | 1 + dapr-spring/dapr-spring-data/pom.xml | 1 + dapr-spring/dapr-spring-messaging/pom.xml | 1 + dapr-spring/dapr-spring-workflows/pom.xml | 1 + dapr-spring/pom.xml | 41 +- examples/pom.xml | 7 +- pom.xml | 391 +++++++----------- sdk-actors/pom.xml | 63 +-- sdk-autogen/pom.xml | 9 +- sdk-springboot/pom.xml | 43 +- sdk-tests/pom.xml | 7 +- sdk-workflows/pom.xml | 63 +-- sdk/pom.xml | 42 +- spring-boot-examples/consumer-app/pom.xml | 1 + spring-boot-examples/pom.xml | 1 + spring-boot-examples/producer-app/pom.xml | 1 + .../workflows/multi-app/orchestrator/pom.xml | 1 + .../workflows/multi-app/pom.xml | 1 + .../workflows/multi-app/worker-one/pom.xml | 1 + .../workflows/multi-app/worker-two/pom.xml | 1 + .../workflows/patterns/pom.xml | 1 + spring-boot-examples/workflows/pom.xml | 1 + testcontainers-dapr/pom.xml | 20 +- 27 files changed, 181 insertions(+), 574 deletions(-) create mode 100644 .mise.toml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 96d7563b26..64f2b6dfd9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -74,12 +74,9 @@ jobs: DAPR_REF: TOXIPROXY_URL: https://github.com/Shopify/toxiproxy/releases/download/v2.5.0/toxiproxy-server-linux-amd64 steps: - - name: Install Stable Docker - id: setup_docker - uses: docker/setup-docker-action@v4 - - name: Check Docker version - run: docker version - uses: actions/checkout@v5 + - name: Check Docker version + run: docker version - name: Set up OpenJDK ${{ env.JDK_VER }} uses: actions/setup-java@v5 with: @@ -153,8 +150,6 @@ jobs: - name: Integration tests using spring boot version ${{ matrix.spring-boot-version }} id: integration_tests run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -Pintegration-tests dependency:copy-dependencies verify - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Upload failsafe test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} uses: actions/upload-artifact@v5 diff --git a/.github/workflows/validate.yml b/.github/workflows/validate.yml index a79ecce96f..727f783df1 100644 --- a/.github/workflows/validate.yml +++ b/.github/workflows/validate.yml @@ -49,9 +49,6 @@ jobs: with: distribution: 'temurin' java-version: ${{ env.JDK_VER }} - - name: Install Stable Docker - id: setup_docker - uses: docker/setup-docker-action@v4 - name: Check Docker version run: docker version - name: Set up Dapr CLI @@ -116,114 +113,76 @@ jobs: run: sleep 30 && docker logs dapr_scheduler && nc -vz localhost 50006 - name: Install jars run: ./mvnw clean install -DskipTests -q - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate workflows example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/workflows/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate Spring Boot examples working-directory: ./spring-boot-examples run: | mm.py README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate Spring Boot Workflow Patterns examples working-directory: ./spring-boot-examples/workflows/patterns run: | mm.py README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate Jobs example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/jobs/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate conversation ai example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/conversation/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate invoke http example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/invoke/http/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate invoke grpc example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/invoke/grpc/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate tracing example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/tracing/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate expection handling example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/exception/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate state example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/state/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate pubsub example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/pubsub/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate bindings HTTP example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/bindings/http/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate secrets example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/secrets/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate unit testing example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/unittesting/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate Configuration API example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/configuration/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate actors example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/actors/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate query state HTTP example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/querystate/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} - name: Validate streaming subscription example working-directory: ./examples run: | mm.py ./src/main/java/io/dapr/examples/pubsub/stream/README.md - env: - DOCKER_HOST: ${{steps.setup_docker.outputs.sock}} diff --git a/.mise.toml b/.mise.toml new file mode 100644 index 0000000000..6daaf4e4cb --- /dev/null +++ b/.mise.toml @@ -0,0 +1,5 @@ +[tools] +java = "temurin-17" + +[env] +JAVA_HOME = "{{exec(command='mise where java')}}" diff --git a/dapr-spring/dapr-spring-boot-autoconfigure/pom.xml b/dapr-spring/dapr-spring-boot-autoconfigure/pom.xml index 7cec5fce66..7afe00e7d0 100644 --- a/dapr-spring/dapr-spring-boot-autoconfigure/pom.xml +++ b/dapr-spring/dapr-spring-boot-autoconfigure/pom.xml @@ -7,6 +7,7 @@ io.dapr.spring dapr-spring-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-spring-boot-autoconfigure diff --git a/dapr-spring/dapr-spring-boot-tests/pom.xml b/dapr-spring/dapr-spring-boot-tests/pom.xml index 5cb08b9bf8..e463802f89 100644 --- a/dapr-spring/dapr-spring-boot-tests/pom.xml +++ b/dapr-spring/dapr-spring-boot-tests/pom.xml @@ -7,6 +7,7 @@ io.dapr.spring dapr-spring-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-spring-boot-tests diff --git a/dapr-spring/dapr-spring-data/pom.xml b/dapr-spring/dapr-spring-data/pom.xml index b41c7c85fc..7beafa478b 100644 --- a/dapr-spring/dapr-spring-data/pom.xml +++ b/dapr-spring/dapr-spring-data/pom.xml @@ -7,6 +7,7 @@ io.dapr.spring dapr-spring-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-spring-data diff --git a/dapr-spring/dapr-spring-messaging/pom.xml b/dapr-spring/dapr-spring-messaging/pom.xml index 90a73c9045..26342138b5 100644 --- a/dapr-spring/dapr-spring-messaging/pom.xml +++ b/dapr-spring/dapr-spring-messaging/pom.xml @@ -7,6 +7,7 @@ io.dapr.spring dapr-spring-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-spring-messaging diff --git a/dapr-spring/dapr-spring-workflows/pom.xml b/dapr-spring/dapr-spring-workflows/pom.xml index 80e12a4684..4ab76673b3 100644 --- a/dapr-spring/dapr-spring-workflows/pom.xml +++ b/dapr-spring/dapr-spring-workflows/pom.xml @@ -7,6 +7,7 @@ io.dapr.spring dapr-spring-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-spring-workflows diff --git a/dapr-spring/pom.xml b/dapr-spring/pom.xml index fc602c3f22..4f34d39710 100644 --- a/dapr-spring/pom.xml +++ b/dapr-spring/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml io.dapr.spring @@ -73,55 +74,15 @@ org.apache.maven.plugins maven-source-plugin - 3.3.1 - - - attach-sources - - jar-no-fork - - - - org.apache.maven.plugins maven-javadoc-plugin - 3.2.0 - - true - - - - attach-javadocs - - jar - - - - org.jacoco jacoco-maven-plugin - 0.8.12 - - default-prepare-agent - - prepare-agent - - - - report - test - - report - - - target/jacoco-report/ - - check diff --git a/examples/pom.xml b/examples/pom.xml index 413d7c45ce..bbb1b2c31b 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk-examples @@ -135,7 +136,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - 0.6.1 + ${protobuf-maven-plugin.version} com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} grpc-java @@ -154,7 +155,7 @@ org.apache.maven.plugins maven-compiler-plugin - 3.13.0 + ${maven-compiler-plugin.version} ${java.version} @@ -162,7 +163,7 @@ org.apache.maven.plugins maven-site-plugin - 3.12.1 + ${maven-site-plugin.version} true diff --git a/pom.xml b/pom.xml index b45636c3e1..b145738aa9 100644 --- a/pom.xml +++ b/pom.xml @@ -18,16 +18,25 @@ 3.25.5 https://raw.githubusercontent.com/dapr/dapr/v1.16.0-rc.5/dapr/proto 1.17.0-SNAPSHOT - 0.17.0-SNAPSHOT 1.7.1 - 3.1.1 - 1.8 - 2.7 + 3.8.1 + 3.1.0 + 3.1.3 3.3.1 3.13.0 - 3.2.1 + 3.3.1 + 3.7.0 + 0.6.1 + 1.6.0 0.8.11 80% + 3.1.0 + 3.4.0 + 10.17.0 + 4.8.2.0 + 3.12.1 + 3.7.0 + 3.4.2 11 11 11 @@ -40,7 +49,7 @@ 2.16.1 true true - ../spotbugs-exclude.xml + ${maven.multiModuleProjectDirectory}/spotbugs-exclude.xml --add-opens java.base/java.lang=ALL-UNNAMED --add-opens java.base/java.util=ALL-UNNAMED 3.5.3 3.2.2 @@ -98,11 +107,9 @@ - - org.slf4j - slf4j-api - ${slf4j.version} - + + + io.grpc grpc-bom @@ -111,9 +118,18 @@ import - io.grpc - grpc-api - ${grpc.version} + org.springframework.boot + spring-boot-dependencies + ${springboot.version} + pom + import + + + com.fasterxml.jackson + jackson-bom + ${jackson.version} + pom + import org.junit @@ -123,123 +139,98 @@ pom - org.mockito - mockito-core - ${mockito.version} - - - io.projectreactor - reactor-test - ${reactor.version} - test - - - io.grpc - grpc-testing - ${grpc.version} - test - - - io.grpc - grpc-inprocess - ${grpc.version} - test + org.testcontainers + testcontainers-bom + ${testcontainers.version} + pom + import - org.jetbrains.kotlin - kotlin-stdlib - ${kotlin.version} + com.google.protobuf + protobuf-bom + ${protobuf.version} + pom + import - org.yaml - snakeyaml - ${snakeyaml.version} + io.opentelemetry + opentelemetry-bom + ${opentelemetry.version} + pom + import + + + + + org.testcontainers testcontainers ${testcontainers.version} - io.dapr - dapr-sdk - ${dapr.sdk.version} - compile - - - org.junit.jupiter + org.testcontainers junit-jupiter - ${junit-bom.version} - - - org.junit.jupiter - junit-jupiter-api - ${junit-bom.version} + ${testcontainers.version} - org.junit.jupiter - junit-jupiter-engine - ${junit-bom.version} + org.testcontainers + kafka + ${testcontainers.version} - org.junit.jupiter - junit-jupiter-params - ${junit-bom.version} + org.testcontainers + postgresql + ${testcontainers.version} org.testcontainers - junit-jupiter + rabbitmq ${testcontainers.version} org.testcontainers - kafka + toxiproxy ${testcontainers.version} + test - org.springframework.boot - spring-boot-dependencies - ${springboot.version} - pom - import + org.testcontainers + mysql + ${testcontainers.version} + test - io.opentelemetry - opentelemetry-bom - ${opentelemetry.version} - pom - import + org.slf4j + slf4j-api + ${slf4j.version} - com.fasterxml.jackson - jackson-bom - ${jackson.version} - pom - import + org.mockito + mockito-core + ${mockito.version} - org.testcontainers - testcontainers-bom - ${testcontainers.version} - pom - import + io.projectreactor + reactor-test + ${reactor.version} + test - com.google.protobuf - protobuf-bom - ${protobuf.version} - pom - import + org.jetbrains.kotlin + kotlin-stdlib + ${kotlin.version} - org.testcontainers - postgresql - ${testcontainers.version} + org.yaml + snakeyaml + ${snakeyaml.version} - org.testcontainers - rabbitmq - ${testcontainers.version} + io.dapr + dapr-sdk + ${dapr.sdk.version} io.rest-assured @@ -256,58 +247,11 @@ dapr-spring-boot-starter-test ${dapr.sdk.version} - - org.springframework.boot - spring-boot-starter-web - ${springboot.version} - - - org.springframework.boot - spring-boot-starter-actuator - ${springboot.version} - - - org.springframework.boot - spring-boot-starter-test - ${springboot.version} - - - org.springframework.boot - spring-boot-autoconfigure - ${springboot.version} - - - org.springframework.data - spring-data-keyvalue - ${springboot.version} - - - org.springframework.data - spring-data-commons - ${springboot.version} - - - org.springframework.boot - spring-boot-testcontainers - ${springboot.version} - io.dapr testcontainers-dapr ${dapr.sdk.version} - - org.testcontainers - toxiproxy - ${testcontainers.version} - test - - - org.testcontainers - mysql - ${testcontainers.version} - test - jakarta.annotation jakarta.annotation-api @@ -328,81 +272,11 @@ commons-cli ${commons-cli.version} - - io.grpc - grpc-protobuf - ${grpc.version} - - - io.grpc - grpc-stub - ${grpc.version} - - - io.grpc - grpc-netty - ${grpc.version} - - - com.google.protobuf - protobuf-java - ${protobuf.version} - - - com.google.protobuf - protobuf-java-util - ${protobuf.version} - commons-io commons-io ${commons-io.version} - - io.opentelemetry - opentelemetry-sdk - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-api - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-context - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-sdk-common - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-sdk-trace - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-sdk-metrics - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-exporter-common - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-exporter-logging - ${opentelemetry.version} - - - io.opentelemetry - opentelemetry-exporter-zipkin - ${opentelemetry.version} - io.zipkin.reporter2 @@ -464,40 +338,31 @@ mockito-inline ${mockito-inline.version} - - org.junit.vintage - junit-vintage-engine - ${junit-vintage-engine.version} - org.junit.platform junit-platform-console-standalone ${junit-platform-console.version} + - com.fasterxml.jackson.core - jackson-databind - ${jackson.version} - - - com.fasterxml.jackson.dataformat - jackson-dataformat-xml - ${jackson.version} + org.junit.jupiter + junit-jupiter + ${junit-bom.version} - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - ${jackson.version} + org.junit.jupiter + junit-jupiter-api + ${junit-bom.version} - com.fasterxml.jackson.core - jackson-core - ${jackson.version} + org.junit.jupiter + junit-jupiter-engine + ${junit-bom.version} - com.fasterxml.jackson.core - jackson-annotations - ${jackson.version} + org.junit.jupiter + junit-jupiter-params + ${junit-bom.version} io.projectreactor @@ -532,7 +397,7 @@ kr.motd.maven os-maven-plugin - 1.7.1 + ${os-maven-plugin.version} @@ -547,6 +412,14 @@ org.apache.maven.plugins maven-source-plugin ${maven-sources-plugin.version} + + + attach-sources + + jar-no-fork + + + org.apache.maven.plugins @@ -639,6 +512,32 @@ + + org.apache.maven.plugins + maven-javadoc-plugin + ${maven-javadoc-plugin.version} + + true + + + + attach-javadocs + + jar + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + ${protobuf-maven-plugin.version} + + + com.googlecode.maven-download-plugin + download-maven-plugin + ${download-maven-plugin.version} + @@ -647,22 +546,10 @@ nexus-staging-maven-plugin false - - org.jacoco - jacoco-maven-plugin - 0.8.11 - - - - prepare-agent - - - - org.apache.maven.plugins maven-gpg-plugin - 3.1.0 + ${maven-gpg-plugin.version} sign-artifacts @@ -683,7 +570,7 @@ org.apache.maven.plugins maven-checkstyle-plugin - 3.4.0 + ${maven-checkstyle-plugin.version} checkstyle.xml .java_header @@ -709,14 +596,14 @@ com.puppycrawl.tools checkstyle - 10.17.0 + ${checkstyle.version} com.github.spotbugs spotbugs-maven-plugin - 4.8.2.0 + ${spotbugs-maven-plugin.version} ${spotbugs.exclude.filter.file} ${spotbugs.fail} @@ -735,7 +622,7 @@ org.apache.maven.plugins maven-site-plugin - 3.8.2 + ${maven-site-plugin.version} @@ -745,7 +632,7 @@ org.apache.maven.plugins maven-project-info-reports-plugin - 2.9 + ${maven-project-info-reports-plugin.version} @@ -756,7 +643,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.2.0 + ${maven-javadoc-plugin.version} aggregate diff --git a/sdk-actors/pom.xml b/sdk-actors/pom.xml index 0ee3a827a3..9adba4f25f 100644 --- a/sdk-actors/pom.xml +++ b/sdk-actors/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk-actors @@ -66,76 +67,14 @@ org.apache.maven.plugins maven-source-plugin - 3.2.1 - - - attach-sources - - jar-no-fork - - - - org.apache.maven.plugins maven-javadoc-plugin - 3.2.0 - - true - - - - attach-javadocs - - jar - - - org.jacoco jacoco-maven-plugin - 0.8.11 - - - default-prepare-agent - - prepare-agent - - - - report - test - - report - - - target/jacoco-report/ - - - - check - - check - - - - - BUNDLE - - - LINE - COVEREDRATIO - 80% - - - - - - - - diff --git a/sdk-autogen/pom.xml b/sdk-autogen/pom.xml index cd37138cc8..b64287d811 100644 --- a/sdk-autogen/pom.xml +++ b/sdk-autogen/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk-autogen @@ -56,7 +57,7 @@ com.googlecode.maven-download-plugin download-maven-plugin - 1.6.0 + ${download-maven-plugin.version} getDaprProto @@ -102,7 +103,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - 0.6.1 + ${protobuf-maven-plugin.version} com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} grpc-java @@ -121,7 +122,7 @@ org.apache.maven.plugins maven-source-plugin - 3.2.1 + ${maven-sources-plugin.version} attach-sources @@ -134,7 +135,7 @@ org.apache.maven.plugins maven-javadoc-plugin - 3.2.0 + ${maven-javadoc-plugin.version} true diff --git a/sdk-springboot/pom.xml b/sdk-springboot/pom.xml index 8e9b4bb852..a3db059f9f 100644 --- a/sdk-springboot/pom.xml +++ b/sdk-springboot/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk-springboot @@ -69,55 +70,15 @@ org.apache.maven.plugins maven-source-plugin - 3.3.1 - - - attach-sources - - jar-no-fork - - - - org.apache.maven.plugins maven-javadoc-plugin - 3.7.0 - - true - - - - attach-javadocs - - jar - - - - org.jacoco jacoco-maven-plugin - 0.8.12 - - default-prepare-agent - - prepare-agent - - - - report - test - - report - - - target/jacoco-report/ - - check @@ -134,7 +95,7 @@ LINE COVEREDRATIO - 80% + ${jacoco-maven-plugin.coverage-ratio} diff --git a/sdk-tests/pom.xml b/sdk-tests/pom.xml index 1b66f08071..29d0511358 100644 --- a/sdk-tests/pom.xml +++ b/sdk-tests/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk-tests @@ -256,7 +257,7 @@ org.xolstice.maven.plugins protobuf-maven-plugin - 0.6.1 + ${protobuf-maven-plugin.version} com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} grpc-java @@ -275,7 +276,7 @@ org.apache.maven.plugins maven-jar-plugin - 3.2.0 + ${maven-jar-plugin.version} @@ -287,7 +288,7 @@ org.apache.maven.plugins maven-failsafe-plugin - 3.2.2 + ${failsafe.version} diff --git a/sdk-workflows/pom.xml b/sdk-workflows/pom.xml index 3afb38c40f..1773d7fa04 100644 --- a/sdk-workflows/pom.xml +++ b/sdk-workflows/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk-workflows @@ -77,76 +78,14 @@ org.apache.maven.plugins maven-source-plugin - 3.2.1 - - - attach-sources - - jar-no-fork - - - - org.apache.maven.plugins maven-javadoc-plugin - 3.2.0 - - true - - - - attach-javadocs - - jar - - - org.jacoco jacoco-maven-plugin - 0.8.11 - - - default-prepare-agent - - prepare-agent - - - - report - test - - report - - - target/jacoco-report/ - - - - check - - check - - - - - BUNDLE - - - LINE - COVEREDRATIO - 80% - - - - - - - - diff --git a/sdk/pom.xml b/sdk/pom.xml index f77de232dd..5011604124 100644 --- a/sdk/pom.xml +++ b/sdk/pom.xml @@ -8,6 +8,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml dapr-sdk @@ -155,53 +156,15 @@ org.apache.maven.plugins maven-source-plugin - 3.2.1 - - - attach-sources - - jar-no-fork - - - org.apache.maven.plugins maven-javadoc-plugin - 3.2.0 - - true - - - - attach-javadocs - - jar - - - org.jacoco jacoco-maven-plugin - 0.8.11 - - default-prepare-agent - - prepare-agent - - - - report - test - - report - - - target/jacoco-report/ - - check @@ -215,7 +178,7 @@ LINE COVEREDRATIO - 80% + ${jacoco-maven-plugin.coverage-ratio} @@ -225,7 +188,6 @@ - diff --git a/spring-boot-examples/consumer-app/pom.xml b/spring-boot-examples/consumer-app/pom.xml index d39a50cf52..78cb375066 100644 --- a/spring-boot-examples/consumer-app/pom.xml +++ b/spring-boot-examples/consumer-app/pom.xml @@ -6,6 +6,7 @@ io.dapr spring-boot-examples 1.17.0-SNAPSHOT + ../pom.xml consumer-app diff --git a/spring-boot-examples/pom.xml b/spring-boot-examples/pom.xml index 928edb1df2..c7efeecf51 100644 --- a/spring-boot-examples/pom.xml +++ b/spring-boot-examples/pom.xml @@ -7,6 +7,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml spring-boot-examples diff --git a/spring-boot-examples/producer-app/pom.xml b/spring-boot-examples/producer-app/pom.xml index 467d48c2d8..ba6dbb6b46 100644 --- a/spring-boot-examples/producer-app/pom.xml +++ b/spring-boot-examples/producer-app/pom.xml @@ -7,6 +7,7 @@ io.dapr spring-boot-examples 1.17.0-SNAPSHOT + ../pom.xml producer-app diff --git a/spring-boot-examples/workflows/multi-app/orchestrator/pom.xml b/spring-boot-examples/workflows/multi-app/orchestrator/pom.xml index c4f8a515f0..91aa23c564 100644 --- a/spring-boot-examples/workflows/multi-app/orchestrator/pom.xml +++ b/spring-boot-examples/workflows/multi-app/orchestrator/pom.xml @@ -7,6 +7,7 @@ io.dapr multi-app 1.17.0-SNAPSHOT + ../pom.xml orchestrator diff --git a/spring-boot-examples/workflows/multi-app/pom.xml b/spring-boot-examples/workflows/multi-app/pom.xml index f6798d7437..2f9570cf25 100644 --- a/spring-boot-examples/workflows/multi-app/pom.xml +++ b/spring-boot-examples/workflows/multi-app/pom.xml @@ -7,6 +7,7 @@ io.dapr workflows 1.17.0-SNAPSHOT + ../pom.xml multi-app diff --git a/spring-boot-examples/workflows/multi-app/worker-one/pom.xml b/spring-boot-examples/workflows/multi-app/worker-one/pom.xml index e2447be6a7..ac1f060a75 100644 --- a/spring-boot-examples/workflows/multi-app/worker-one/pom.xml +++ b/spring-boot-examples/workflows/multi-app/worker-one/pom.xml @@ -7,6 +7,7 @@ io.dapr multi-app 1.17.0-SNAPSHOT + ../pom.xml worker-one diff --git a/spring-boot-examples/workflows/multi-app/worker-two/pom.xml b/spring-boot-examples/workflows/multi-app/worker-two/pom.xml index 9584fabf30..e44c0f197e 100644 --- a/spring-boot-examples/workflows/multi-app/worker-two/pom.xml +++ b/spring-boot-examples/workflows/multi-app/worker-two/pom.xml @@ -7,6 +7,7 @@ io.dapr multi-app 1.17.0-SNAPSHOT + ../pom.xml worker-two diff --git a/spring-boot-examples/workflows/patterns/pom.xml b/spring-boot-examples/workflows/patterns/pom.xml index a737ec5dc6..5aa157ed37 100644 --- a/spring-boot-examples/workflows/patterns/pom.xml +++ b/spring-boot-examples/workflows/patterns/pom.xml @@ -7,6 +7,7 @@ io.dapr workflows 1.17.0-SNAPSHOT + ../pom.xml patterns diff --git a/spring-boot-examples/workflows/pom.xml b/spring-boot-examples/workflows/pom.xml index 1c711fbc12..d6f8a8eaee 100644 --- a/spring-boot-examples/workflows/pom.xml +++ b/spring-boot-examples/workflows/pom.xml @@ -7,6 +7,7 @@ io.dapr spring-boot-examples 1.17.0-SNAPSHOT + ../pom.xml workflows diff --git a/testcontainers-dapr/pom.xml b/testcontainers-dapr/pom.xml index 9b78aeb9e9..786ec56a96 100644 --- a/testcontainers-dapr/pom.xml +++ b/testcontainers-dapr/pom.xml @@ -6,6 +6,7 @@ io.dapr dapr-sdk-parent 1.17.0-SNAPSHOT + ../pom.xml testcontainers-dapr @@ -43,29 +44,10 @@ org.apache.maven.plugins maven-source-plugin - - - attach-sources - - jar-no-fork - - - org.apache.maven.plugins maven-javadoc-plugin - - true - - - - attach-javadocs - - jar - - - org.jacoco From 74e0e6a4861018cd5d6a808260638d8f926f5fcd Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Fri, 5 Dec 2025 10:38:40 +0200 Subject: [PATCH 11/17] Adding a Flux based subscribeToEvents method (#1598) * Adding a Flux based subscribeToEvents method Signed-off-by: Artur Ciocanu * Simplify GRPC stream handling Signed-off-by: Artur Ciocanu * Simplify Javadoc Signed-off-by: Artur Ciocanu * Fix unit tests and simplify implementation Signed-off-by: Artur Ciocanu * Adding event subscriber stream observer to simplify subscription logic Signed-off-by: Artur Ciocanu * Use start() method to start stream subscription Signed-off-by: Artur Ciocanu * Add unit test for event suscriber observer Signed-off-by: Artur Ciocanu * Improve the tests a little bit Signed-off-by: Artur Ciocanu * Remove the unnecessary method Signed-off-by: Artur Ciocanu * Improve error handling and use CloudEvent wrapper Signed-off-by: Artur Ciocanu * Fix unit tests asserts Signed-off-by: Artur Ciocanu * Adjust Java examples for Subscriber Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy --- .../io/dapr/examples/pubsub/stream/README.md | 30 +- .../examples/pubsub/stream/Subscriber.java | 31 +- .../java/io/dapr/client/DaprClientImpl.java | 37 ++ .../io/dapr/client/DaprPreviewClient.java | 16 +- .../java/io/dapr/client/Subscription.java | 1 + .../io/dapr/client/SubscriptionListener.java | 1 + .../EventSubscriberStreamObserver.java | 223 ++++++++ .../client/DaprPreviewClientGrpcTest.java | 81 +++ .../EventSubscriberStreamObserverTest.java | 506 ++++++++++++++++++ 9 files changed, 887 insertions(+), 39 deletions(-) create mode 100644 sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java create mode 100644 sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java diff --git a/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md b/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md index d9d41b3759..da3e4e2482 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md +++ b/examples/src/main/java/io/dapr/examples/pubsub/stream/README.md @@ -49,7 +49,7 @@ The subscriber uses the `DaprPreviewClient` interface to use a new feature where The publisher is a simple Java application with a main method that uses the Dapr gRPC Client to publish 10 messages to a specific topic. -In the `Subscriber.java` file, you will find the `Subscriber` class, containing the main method. The main method declares a `DaprPreviewClient` using the `DaprClientBuilder` class. When invoking `subscribeToEvents`, the subscriber provides an implementation of the `SubscriptionListener` interface, receiving a `Subscription` object. The `Subscription` object implements the `Closeable` interface and the `close()` method must be used to stop the subscription. +In the `Subscriber.java` file, you will find the `Subscriber` class, containing the main method. The main method declares a `DaprPreviewClient` using the `DaprClientBuilder` class. When invoking `subscribeToEvents`, the method returns a `Flux>` that can be processed using reactive operators like `doOnNext()` for event handling and `doOnError()` for error handling. The example uses `blockLast()` to keep the subscriber running indefinitely. For production use cases requiring explicit subscription lifecycle control, you can use `.subscribe()` which returns a `Disposable` that can be disposed via `disposable.dispose()`. ```java public class Subscriber { @@ -59,25 +59,19 @@ public class Subscriber { public static void main(String[] args) throws Exception { String topicName = getTopicName(args); try (var client = new DaprClientBuilder().buildPreviewClient()) { - var subscription = client.subscribeToEvents( + // Subscribe to events using the Flux-based reactive API + // The stream will emit CloudEvent objects as they arrive + client.subscribeToEvents( PUBSUB_NAME, topicName, - new SubscriptionListener<>() { - - @Override - public Mono onEvent(CloudEvent event) { - System.out.println("Subscriber got: " + event.getData()); - return Mono.just(Status.SUCCESS); - } - - @Override - public void onError(RuntimeException exception) { - System.out.println("Subscriber got exception: " + exception.getMessage()); - } - }, - TypeRef.STRING); - - subscription.awaitTermination(); + TypeRef.STRING) + .doOnNext(event -> { + System.out.println("Subscriber got: " + event.getData()); + }) + .doOnError(throwable -> { + System.out.println("Subscriber got exception: " + throwable.getMessage()); + }) + .blockLast(); // Blocks indefinitely until the stream completes (keeps the subscriber running) } } diff --git a/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java b/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java index 31678dce08..763bb436ce 100644 --- a/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java +++ b/examples/src/main/java/io/dapr/examples/pubsub/stream/Subscriber.java @@ -14,10 +14,7 @@ package io.dapr.examples.pubsub.stream; import io.dapr.client.DaprClientBuilder; -import io.dapr.client.SubscriptionListener; -import io.dapr.client.domain.CloudEvent; import io.dapr.utils.TypeRef; -import reactor.core.publisher.Mono; /** * Subscriber using bi-directional gRPC streaming, which does not require an app port. @@ -44,25 +41,19 @@ public class Subscriber { public static void main(String[] args) throws Exception { String topicName = getTopicName(args); try (var client = new DaprClientBuilder().buildPreviewClient()) { - var subscription = client.subscribeToEvents( + // Subscribe to events using the Flux-based reactive API + // The stream will emit CloudEvent objects as they arrive + client.subscribeToEvents( PUBSUB_NAME, topicName, - new SubscriptionListener<>() { - - @Override - public Mono onEvent(CloudEvent event) { - System.out.println("Subscriber got: " + event.getData()); - return Mono.just(Status.SUCCESS); - } - - @Override - public void onError(RuntimeException exception) { - System.out.println("Subscriber got exception: " + exception.getMessage()); - } - }, - TypeRef.STRING); - - subscription.awaitTermination(); + TypeRef.STRING) + .doOnNext(event -> { + System.out.println("Subscriber got: " + event.getData()); + }) + .doOnError(throwable -> { + System.out.println("Subscriber got exception: " + throwable.getMessage()); + }) + .blockLast(); // Blocks indefinitely until the stream completes (keeps the subscriber running) } } diff --git a/sdk/src/main/java/io/dapr/client/DaprClientImpl.java b/sdk/src/main/java/io/dapr/client/DaprClientImpl.java index 012921a89e..0dfb1b644b 100644 --- a/sdk/src/main/java/io/dapr/client/DaprClientImpl.java +++ b/sdk/src/main/java/io/dapr/client/DaprClientImpl.java @@ -91,6 +91,7 @@ import io.dapr.internal.grpc.DaprClientGrpcInterceptors; import io.dapr.internal.resiliency.RetryPolicy; import io.dapr.internal.resiliency.TimeoutPolicy; +import io.dapr.internal.subscription.EventSubscriberStreamObserver; import io.dapr.serializer.DaprObjectSerializer; import io.dapr.serializer.DefaultObjectSerializer; import io.dapr.utils.DefaultContentTypeConverter; @@ -475,6 +476,42 @@ public Subscription subscribeToEvents( return buildSubscription(listener, type, request); } + /** + * {@inheritDoc} + */ + @Override + public Flux> subscribeToEvents(String pubsubName, String topic, TypeRef type) { + DaprProtos.SubscribeTopicEventsRequestInitialAlpha1 initialRequest = + DaprProtos.SubscribeTopicEventsRequestInitialAlpha1.newBuilder() + .setTopic(topic) + .setPubsubName(pubsubName) + .build(); + DaprProtos.SubscribeTopicEventsRequestAlpha1 request = + DaprProtos.SubscribeTopicEventsRequestAlpha1.newBuilder() + .setInitialRequest(initialRequest) + .build(); + + return Flux.create(sink -> { + DaprGrpc.DaprStub interceptedStub = this.grpcInterceptors.intercept(this.asyncStub); + EventSubscriberStreamObserver eventSubscriber = new EventSubscriberStreamObserver<>( + interceptedStub, + sink, + type, + this.objectSerializer + ); + StreamObserver requestStream = eventSubscriber.start(request); + + // Cleanup when Flux is cancelled or completed + sink.onDispose(() -> { + try { + requestStream.onCompleted(); + } catch (Exception e) { + logger.debug("Completing the subscription stream resulted in an error: {}", e.getMessage()); + } + }); + }, FluxSink.OverflowStrategy.BUFFER); + } + @Nonnull private Subscription buildSubscription( SubscriptionListener listener, diff --git a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java index 92c6a61c3e..545b8e5dc5 100644 --- a/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java +++ b/sdk/src/main/java/io/dapr/client/DaprPreviewClient.java @@ -17,6 +17,7 @@ import io.dapr.client.domain.BulkPublishRequest; import io.dapr.client.domain.BulkPublishResponse; import io.dapr.client.domain.BulkPublishResponseFailedEntry; +import io.dapr.client.domain.CloudEvent; import io.dapr.client.domain.ConversationRequest; import io.dapr.client.domain.ConversationRequestAlpha2; import io.dapr.client.domain.ConversationResponse; @@ -32,6 +33,7 @@ import io.dapr.client.domain.UnlockResponseStatus; import io.dapr.client.domain.query.Query; import io.dapr.utils.TypeRef; +import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import java.util.List; @@ -271,12 +273,24 @@ Mono> publishEvents(String pubsubName, String topicNa * @param topic Name of the topic to subscribe to. * @param listener Callback methods to process events. * @param type Type for object deserialization. - * @return An active subscription. * @param Type of object deserialization. + * @return An active subscription. + * @deprecated Use {@link #subscribeToEvents(String, String, TypeRef)} instead for a more reactive approach. */ + @Deprecated Subscription subscribeToEvents( String pubsubName, String topic, SubscriptionListener listener, TypeRef type); + /** + * Subscribe to pubsub events via streaming using Project Reactor Flux. + * @param pubsubName Name of the pubsub component. + * @param topic Name of the topic to subscribe to. + * @param type Type for object deserialization. + * @return A Flux of CloudEvents containing deserialized event payloads and metadata. + * @param Type of the event payload. + */ + Flux> subscribeToEvents(String pubsubName, String topic, TypeRef type); + /** * Schedules a job using the provided job request details. * diff --git a/sdk/src/main/java/io/dapr/client/Subscription.java b/sdk/src/main/java/io/dapr/client/Subscription.java index 53e89e8456..2cbd1e9b30 100644 --- a/sdk/src/main/java/io/dapr/client/Subscription.java +++ b/sdk/src/main/java/io/dapr/client/Subscription.java @@ -35,6 +35,7 @@ * Streaming subscription of events for Dapr's pubsub. * @param Application's object type. */ +@Deprecated public class Subscription implements Closeable { private final BlockingQueue ackQueue = new LinkedBlockingQueue<>(50); diff --git a/sdk/src/main/java/io/dapr/client/SubscriptionListener.java b/sdk/src/main/java/io/dapr/client/SubscriptionListener.java index 5a467d69f4..c5420af602 100644 --- a/sdk/src/main/java/io/dapr/client/SubscriptionListener.java +++ b/sdk/src/main/java/io/dapr/client/SubscriptionListener.java @@ -20,6 +20,7 @@ * Callback interface to receive events from a streaming subscription of events. * @param Object type for deserialization. */ +@Deprecated public interface SubscriptionListener { /** diff --git a/sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java b/sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java new file mode 100644 index 0000000000..56131882b8 --- /dev/null +++ b/sdk/src/main/java/io/dapr/internal/subscription/EventSubscriberStreamObserver.java @@ -0,0 +1,223 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.internal.subscription; + +import io.dapr.client.domain.CloudEvent; +import io.dapr.exceptions.DaprException; +import io.dapr.serializer.DaprObjectSerializer; +import io.dapr.utils.TypeRef; +import io.dapr.v1.DaprAppCallbackProtos; +import io.dapr.v1.DaprGrpc; +import io.dapr.v1.DaprProtos; +import io.grpc.stub.StreamObserver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import reactor.core.publisher.FluxSink; + +import java.io.IOException; + +/** + * StreamObserver implementation for subscribing to Dapr pub/sub events. + * Thread Safety: This class relies on gRPC's StreamObserver contract, which guarantees that + * onNext(), onError(), and onCompleted() are never called concurrently and always from the + * same thread. Therefore, no additional synchronization is needed. + * + * @param The type of the event payload + */ +public class EventSubscriberStreamObserver implements StreamObserver { + + private static final Logger logger = LoggerFactory.getLogger(EventSubscriberStreamObserver.class); + + private final DaprGrpc.DaprStub stub; + private final FluxSink> sink; + private final TypeRef type; + private final DaprObjectSerializer objectSerializer; + + private StreamObserver requestStream; + + /** + * Creates a new EventSubscriberStreamObserver. + * + * @param stub The gRPC stub for making Dapr service calls + * @param sink The FluxSink to emit CloudEvents to + * @param type The TypeRef for deserializing event payloads + * @param objectSerializer The serializer to use for deserialization + */ + public EventSubscriberStreamObserver( + DaprGrpc.DaprStub stub, + FluxSink> sink, + TypeRef type, + DaprObjectSerializer objectSerializer) { + this.stub = stub; + this.sink = sink; + this.type = type; + this.objectSerializer = objectSerializer; + } + + /** Starts the subscription by sending the initial request. + * + * @param request The subscription request + * @return The StreamObserver to send further requests (acknowledgments) + */ + public StreamObserver start( + DaprProtos.SubscribeTopicEventsRequestAlpha1 request + ) { + requestStream = stub.subscribeTopicEventsAlpha1(this); + + requestStream.onNext(request); + + return requestStream; + } + + @Override + public void onNext(DaprProtos.SubscribeTopicEventsResponseAlpha1 response) { + if (!isValidEventMessage(response)) { + return; + } + + DaprAppCallbackProtos.TopicEventRequest message = response.getEventMessage(); + String eventId = message.getId(); + + try { + T data = deserializeEventData(message); + CloudEvent cloudEvent = buildCloudEvent(message, data); + emitEventAndAcknowledge(cloudEvent, eventId); + } catch (IOException e) { + // Deserialization failure - send DROP ack + handleDeserializationError(eventId, e); + } catch (Exception e) { + // Processing failure - send RETRY ack + handleProcessingError(eventId, e); + } + } + + @Override + public void onError(Throwable throwable) { + sink.error(DaprException.propagate(throwable)); + } + + @Override + public void onCompleted() { + sink.complete(); + } + + private boolean isValidEventMessage(DaprProtos.SubscribeTopicEventsResponseAlpha1 response) { + if (response.getEventMessage() == null) { + logger.debug("Received response with null event message, skipping"); + return false; + } + + DaprAppCallbackProtos.TopicEventRequest message = response.getEventMessage(); + + if (message.getPubsubName() == null || message.getPubsubName().isEmpty()) { + logger.debug("Received event with empty pubsub name, skipping"); + return false; + } + + if (message.getId() == null || message.getId().isEmpty()) { + logger.debug("Received event with empty ID, skipping"); + return false; + } + + return true; + } + + private T deserializeEventData(DaprAppCallbackProtos.TopicEventRequest message) throws IOException { + if (type == null) { + logger.debug("Type is null, skipping deserialization for event ID: {}", message.getId()); + return null; + } + + return objectSerializer.deserialize(message.getData().toByteArray(), type); + } + + private CloudEvent buildCloudEvent(DaprAppCallbackProtos.TopicEventRequest message, T data) { + CloudEvent cloudEvent = new CloudEvent<>(); + + cloudEvent.setId(message.getId()); + cloudEvent.setType(message.getType()); + cloudEvent.setSpecversion(message.getSpecVersion()); + cloudEvent.setDatacontenttype(message.getDataContentType()); + cloudEvent.setTopic(message.getTopic()); + cloudEvent.setPubsubName(message.getPubsubName()); + cloudEvent.setData(data); + + return cloudEvent; + } + + private void emitEventAndAcknowledge(CloudEvent cloudEvent, String eventId) { + sink.next(cloudEvent); + + // Send SUCCESS acknowledgment + requestStream.onNext(buildSuccessAck(eventId)); + } + + private void handleDeserializationError(String eventId, IOException cause) { + logger.error("Deserialization failed for event ID: {}, sending DROP ack", eventId, cause); + + // Send DROP ack - cannot process malformed data + requestStream.onNext(buildDropAck(eventId)); + + // Propagate error to sink + sink.error(new DaprException("DESERIALIZATION_ERROR", + "Failed to deserialize event with ID: " + eventId, cause)); + } + + private void handleProcessingError(String eventId, Exception cause) { + logger.error("Processing error for event ID: {}, attempting to send RETRY ack", eventId, cause); + + try { + // Try to send RETRY acknowledgment + requestStream.onNext(buildRetryAck(eventId)); + } catch (Exception ackException) { + // Failed to send ack - this is critical + logger.error("Failed to send RETRY ack for event ID: {}", eventId, ackException); + sink.error(DaprException.propagate(ackException)); + + return; + } + + // Propagate the original processing error + sink.error(DaprException.propagate(cause)); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildSuccessAck(String eventId) { + return buildAckRequest(eventId, DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.SUCCESS); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildRetryAck(String eventId) { + return buildAckRequest(eventId, DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.RETRY); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildDropAck(String eventId) { + return buildAckRequest(eventId, DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.DROP); + } + + private static DaprProtos.SubscribeTopicEventsRequestAlpha1 buildAckRequest( + String eventId, + DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus status) { + DaprProtos.SubscribeTopicEventsRequestProcessedAlpha1 eventProcessed = + DaprProtos.SubscribeTopicEventsRequestProcessedAlpha1.newBuilder() + .setId(eventId) + .setStatus( + DaprAppCallbackProtos.TopicEventResponse.newBuilder() + .setStatus(status) + .build()) + .build(); + + return DaprProtos.SubscribeTopicEventsRequestAlpha1.newBuilder() + .setEventProcessed(eventProcessed) + .build(); + } +} diff --git a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java index f7b5584cc7..a42c4f946c 100644 --- a/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java +++ b/sdk/src/test/java/io/dapr/client/DaprPreviewClientGrpcTest.java @@ -586,6 +586,87 @@ public void onError(RuntimeException exception) { assertEquals(numErrors, errors.size()); } + @Test + public void subscribeEventFluxTest() throws Exception { + var numEvents = 100; + var pubsubName = "pubsubName"; + var topicName = "topicName"; + var data = "my message"; + var started = new Semaphore(0); + + doAnswer((Answer>) invocation -> { + StreamObserver observer = + (StreamObserver) invocation.getArguments()[0]; + + var emitterThread = new Thread(() -> { + try { + started.acquire(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + + observer.onNext(DaprProtos.SubscribeTopicEventsResponseAlpha1.getDefaultInstance()); + + for (int i = 0; i < numEvents; i++) { + DaprProtos.SubscribeTopicEventsResponseAlpha1 reponse = + DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage(DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId(Integer.toString(i)) + .setPubsubName(pubsubName) + .setTopic(topicName) + .setData(ByteString.copyFromUtf8("\"" + data + "\"")) + .setDataContentType("application/json") + .build()) + .build(); + observer.onNext(reponse); + } + + observer.onCompleted(); + }); + + emitterThread.start(); + + return new StreamObserver<>() { + @Override + public void onNext(DaprProtos.SubscribeTopicEventsRequestAlpha1 subscribeTopicEventsRequestAlpha1) { + started.release(); + } + + @Override + public void onError(Throwable throwable) { + // No-op + } + + @Override + public void onCompleted() { + // No-op + } + }; + }).when(daprStub).subscribeTopicEventsAlpha1(any(StreamObserver.class)); + + final AtomicInteger eventCount = new AtomicInteger(0); + final Semaphore gotAll = new Semaphore(0); + var disposable = previewClient.subscribeToEvents(pubsubName, topicName, TypeRef.STRING) + .doOnNext(cloudEvent -> { + assertEquals(data, cloudEvent.getData()); + assertEquals(pubsubName, cloudEvent.getPubsubName()); + assertEquals(topicName, cloudEvent.getTopic()); + assertNotNull(cloudEvent.getId()); + + int count = eventCount.incrementAndGet(); + + if (count >= numEvents) { + gotAll.release(); + } + }) + .subscribe(); + + gotAll.acquire(); + disposable.dispose(); + + assertEquals(numEvents, eventCount.get()); + } + @Test public void converseShouldThrowIllegalArgumentExceptionWhenComponentNameIsNull() throws Exception { List inputs = new ArrayList<>(); diff --git a/sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java b/sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java new file mode 100644 index 0000000000..7328f79e51 --- /dev/null +++ b/sdk/src/test/java/io/dapr/internal/subscription/EventSubscriberStreamObserverTest.java @@ -0,0 +1,506 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. +*/ + +package io.dapr.internal.subscription; + +import com.google.protobuf.ByteString; +import io.dapr.client.domain.CloudEvent; +import io.dapr.exceptions.DaprException; +import io.dapr.serializer.DaprObjectSerializer; +import io.dapr.serializer.DefaultObjectSerializer; +import io.dapr.utils.TypeRef; +import io.dapr.v1.DaprAppCallbackProtos; +import io.dapr.v1.DaprGrpc; +import io.dapr.v1.DaprProtos; +import io.grpc.stub.StreamObserver; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +/** + * Unit tests for EventSubscriberStreamObserver. + */ +class EventSubscriberStreamObserverTest { + + public static final String PUBSUB_NAME = "pubsub"; + public static final String TOPIC_NAME = "topic"; + private DaprGrpc.DaprStub mockStub; + private DaprObjectSerializer objectSerializer; + private StreamObserver mockRequestStream; + + @BeforeEach + @SuppressWarnings("unchecked") + void setUp() { + mockStub = mock(DaprGrpc.DaprStub.class); + objectSerializer = new DefaultObjectSerializer(); + mockRequestStream = mock(StreamObserver.class); + + when(mockStub.subscribeTopicEventsAlpha1(any())).thenReturn(mockRequestStream); + } + + @Test + @DisplayName("Should successfully process events and send SUCCESS acks") + void testSuccessfulEventProcessing() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + // Start the subscription + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + // Simulate receiving an event + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = buildEventResponse( + "event-1", + "Hello World" + ); + observer.onNext(response); + + // Complete the stream + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + assertEquals("Hello World", cloudEvent.getData()); + assertEquals("event-1", cloudEvent.getId()); + assertEquals(PUBSUB_NAME, cloudEvent.getPubsubName()); + assertEquals(TOPIC_NAME, cloudEvent.getTopic()); + }) + .verifyComplete(); + + ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(DaprProtos.SubscribeTopicEventsRequestAlpha1.class); + + verify(mockRequestStream, times(2)).onNext(requestCaptor.capture()); + + List requests = requestCaptor.getAllValues(); + + assertEquals(2, requests.size()); + assertTrue(requests.get(0).hasInitialRequest()); + assertTrue(requests.get(1).hasEventProcessed()); + assertEquals("event-1", requests.get(1).getEventProcessed().getId()); + assertEquals( + DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.SUCCESS, + requests.get(1).getEventProcessed().getStatus().getStatus() + ); + } + + @Test + @DisplayName("Should handle multiple consecutive events correctly") + void testMultipleEvents() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + observer.onNext(buildEventResponse("event-1", "Message 1")); + observer.onNext(buildEventResponse("event-2", "Message 2")); + observer.onNext(buildEventResponse("event-3", "Message 3")); + + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + assertEquals("Message 1", cloudEvent.getData()); + assertEquals("event-1", cloudEvent.getId()); + }) + .assertNext(cloudEvent -> { + assertEquals("Message 2", cloudEvent.getData()); + assertEquals("event-2", cloudEvent.getId()); + }) + .assertNext(cloudEvent -> { + assertEquals("Message 3", cloudEvent.getData()); + assertEquals("event-3", cloudEvent.getId()); + }) + .verifyComplete(); + + verify(mockRequestStream, times(4)).onNext(any()); + } + + @Test + @DisplayName("Should send DROP ack when deserialization fails") + void testDeserializationError() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + // Send an event with invalid data (can't deserialize to String) + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("event-1") + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFrom(new byte[]{(byte) 0xFF, (byte) 0xFE})) // Invalid UTF-8 + .build() + ) + .build(); + + observer.onNext(response); + }); + + StepVerifier.create(flux) + .expectErrorMatches(error -> + error instanceof DaprException + && error.getMessage().contains("DESERIALIZATION_ERROR") + && error.getMessage().contains("event-1")) + .verify(); + + ArgumentCaptor requestCaptor = + ArgumentCaptor.forClass(DaprProtos.SubscribeTopicEventsRequestAlpha1.class); + + verify(mockRequestStream, atLeast(2)).onNext(requestCaptor.capture()); + + List ackRequests = requestCaptor.getAllValues().stream() + .filter(DaprProtos.SubscribeTopicEventsRequestAlpha1::hasEventProcessed) + .collect(Collectors.toList()); + + assertEquals(1, ackRequests.size()); + assertEquals("event-1", ackRequests.get(0).getEventProcessed().getId()); + assertEquals( + DaprAppCallbackProtos.TopicEventResponse.TopicEventResponseStatus.DROP, + ackRequests.get(0).getEventProcessed().getStatus().getStatus() + ); + } + + @Test + @DisplayName("Should send RETRY ack when non-deserialization error occurs") + void testProcessingError() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest(); + observer.start(initialRequest); + + // Simulate a processing error by throwing during sink.next() + sink.onRequest(n -> { + throw new RuntimeException("Processing error"); + }); + + observer.onNext(buildEventResponse("event-1", "Hello")); + }); + + StepVerifier.create(flux) + .expectError(RuntimeException.class) + .verify(); + + // Note: When error occurs in onRequest callback (before processing), + // no ack is sent as the error happens before we can handle the event + verify(mockRequestStream, times(1)).onNext(any()); // Only initial request sent + } + + @Test + @DisplayName("Should propagate gRPC errors as DaprException") + void testGrpcError() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest(); + observer.start(initialRequest); + + // Simulate gRPC error + observer.onError(new RuntimeException("gRPC connection failed")); + }); + + StepVerifier.create(flux) + .expectError(DaprException.class) + .verify(); + } + + @Test + @DisplayName("Should handle null event messages gracefully without emitting events") + void testNullEventMessage() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .verifyComplete(); + + verify(mockRequestStream, times(1)).onNext(any()); + } + + @Test + @DisplayName("Should skip events with empty pubsub name") + void testEmptyPubsubName() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("event-1") + .setPubsubName("") + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFromUtf8("\"Hello\"")) + .build() + ) + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .verifyComplete(); + + verify(mockRequestStream, times(1)).onNext(any()); + } + + @Test + @DisplayName("Should skip events with empty event ID") + void testEmptyEventId() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("") + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFromUtf8("\"Hello\"")) + .build() + ) + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .verifyComplete(); + + verify(mockRequestStream, times(1)).onNext(any()); + } + + @Test + @DisplayName("Should handle null type parameter and emit CloudEvent with null data") + void testNullData() { + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + null, // null type + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + + observer.start(initialRequest); + observer.onNext(buildEventResponse("event-1", "Hello")); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + assertNull(cloudEvent.getData()); + assertEquals("event-1", cloudEvent.getId()); + assertEquals(PUBSUB_NAME, cloudEvent.getPubsubName()); + assertEquals(TOPIC_NAME, cloudEvent.getTopic()); + }) + .verifyComplete(); + + verify(mockRequestStream, times(2)).onNext(any()); + } + + @Test + @DisplayName("Should deserialize and emit complex objects correctly") + void testComplexObjectSerialization() throws IOException { + TestEvent testEvent = new TestEvent("test-name", 42); + byte[] serializedEvent = objectSerializer.serialize(testEvent); + + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.get(TestEvent.class), + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest( + ); + observer.start(initialRequest); + + DaprProtos.SubscribeTopicEventsResponseAlpha1 response = DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId("event-1") + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFrom(serializedEvent)) + .build() + ) + .build(); + + observer.onNext(response); + observer.onCompleted(); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> { + TestEvent event = cloudEvent.getData(); + assertEquals("test-name", event.name); + assertEquals(42, event.value); + assertEquals("event-1", cloudEvent.getId()); + }) + .verifyComplete(); + } + + @Test + @DisplayName("Should propagate errors when ack sending fails") + void testErrorDuringSendingAck() { + doThrow(new RuntimeException("Failed to send ack")) + .when(mockRequestStream) + .onNext(argThat(DaprProtos.SubscribeTopicEventsRequestAlpha1::hasEventProcessed)); + + Flux> flux = Flux.create(sink -> { + EventSubscriberStreamObserver observer = new EventSubscriberStreamObserver<>( + mockStub, + sink, + TypeRef.STRING, + objectSerializer + ); + + DaprProtos.SubscribeTopicEventsRequestAlpha1 initialRequest = buildInitialRequest(); + observer.start(initialRequest); + + observer.onNext(buildEventResponse("event-1", "Hello")); + }); + + StepVerifier.create(flux) + .assertNext(cloudEvent -> assertEquals("Hello", cloudEvent.getData())) // Event is emitted before ack + .expectError(DaprException.class) // Then error when sending ack + .verify(); + } + + private DaprProtos.SubscribeTopicEventsRequestAlpha1 buildInitialRequest() { + return DaprProtos.SubscribeTopicEventsRequestAlpha1.newBuilder() + .setInitialRequest( + DaprProtos.SubscribeTopicEventsRequestInitialAlpha1.newBuilder() + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .build() + ) + .build(); + } + + private DaprProtos.SubscribeTopicEventsResponseAlpha1 buildEventResponse(String eventId, String data) { + + try { + byte[] serializedData = objectSerializer.serialize(data); + return DaprProtos.SubscribeTopicEventsResponseAlpha1.newBuilder() + .setEventMessage( + DaprAppCallbackProtos.TopicEventRequest.newBuilder() + .setId(eventId) + .setPubsubName(PUBSUB_NAME) + .setTopic(TOPIC_NAME) + .setData(ByteString.copyFrom(serializedData)) + .build() + ) + .build(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static class TestEvent { + public String name; + public int value; + + public TestEvent() { + } + + public TestEvent(String name, int value) { + this.name = name; + this.value = value; + } + } +} From 09298eb45486d04b9d88214248a843eec900e68b Mon Sep 17 00:00:00 2001 From: Marc Duiker Date: Fri, 5 Dec 2025 15:07:50 +0100 Subject: [PATCH 12/17] Remove SDK docs due to migration to main Docs repo (#1593) * Remove SDK docs due to migration to main Docs repo Signed-off-by: Marc Duiker * Remove sed lines related to sdk docs Signed-off-by: Marc Duiker --------- Signed-off-by: Marc Duiker Co-authored-by: salaboy Signed-off-by: salaboy --- .github/scripts/update_docs.sh | 9 - daprdocs/README.md | 25 - .../java-contributing.md | 27 - daprdocs/content/en/java-sdk-docs/_index.md | 145 ---- .../en/java-sdk-docs/java-ai/_index.md | 7 - .../en/java-sdk-docs/java-ai/java-ai-howto.md | 105 --- .../en/java-sdk-docs/java-client/_index.md | 756 ------------------ .../java-sdk-docs/java-client/properties.md | 211 ----- .../en/java-sdk-docs/java-jobs/_index.md | 7 - .../java-jobs/java-jobs-howto.md | 164 ---- .../en/java-sdk-docs/java-workflow/_index.md | 7 - .../java-workflow/java-workflow-howto.md | 284 ------- .../en/java-sdk-docs/spring-boot/_index.md | 347 -------- 13 files changed, 2094 deletions(-) delete mode 100644 daprdocs/README.md delete mode 100644 daprdocs/content/en/java-sdk-contributing/java-contributing.md delete mode 100644 daprdocs/content/en/java-sdk-docs/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-ai/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-client/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-client/properties.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-jobs/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-workflow/_index.md delete mode 100644 daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md delete mode 100644 daprdocs/content/en/java-sdk-docs/spring-boot/_index.md diff --git a/.github/scripts/update_docs.sh b/.github/scripts/update_docs.sh index 21c6bddc71..0116765080 100755 --- a/.github/scripts/update_docs.sh +++ b/.github/scripts/update_docs.sh @@ -11,21 +11,12 @@ DAPR_JAVA_SDK_ALPHA_VERSION=`echo $DAPR_JAVA_SDK_VERSION | sed 's/^[0-9]*\./0./' if [[ "$OSTYPE" == "darwin"* ]]; then sed -i bak "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" README.md sed -i bak "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" README.md - sed -i bak "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i bak "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i bak "s/.*<\/version>\$/${DAPR_JAVA_SDK_ALPHA_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/spring-boot/_index.md rm README.mdbak else sed -i "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" README.md sed -i "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" README.md - sed -i "s/.*<\/version>\$/${DAPR_JAVA_SDK_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i "s/compile('io.dapr:\(.*\):.*')/compile('io.dapr:\\1:${DAPR_JAVA_SDK_VERSION}')/g" daprdocs/content/en/java-sdk-docs/_index.md - sed -i "s/.*<\/version>\$/${DAPR_JAVA_SDK_ALPHA_VERSION}<\/version>/g" daprdocs/content/en/java-sdk-docs/spring-boot/_index.md fi -rm -f daprdocs/content/en/java-sdk-docs/_index.mdbak || echo -rm -f daprdocs/content/en/java-sdk-docs/spring-boot/_index.md/_index.mdbak || echo - rm -rf docs ./mvnw -Dmaven.test.skip=true -Djacoco.skip=true clean install ./mvnw -Dmaven.test.skip=true -Djacoco.skip=true site-deploy diff --git a/daprdocs/README.md b/daprdocs/README.md deleted file mode 100644 index 1fe0b1234b..0000000000 --- a/daprdocs/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Dapr Java SDK documentation - -This page covers how the documentation is structured for the Dapr Java SDK - -## Dapr Docs - -All Dapr documentation is hosted at [docs.dapr.io](https://docs.dapr.io), including the docs for the [Java SDK](https://docs.dapr.io/developing-applications/sdks/java/). Head over there if you want to read the docs. - -### Java SDK docs source - -Although the docs site code and content is in the [docs repo](https://github.com/dapr/docs), the Java SDK content and images are within the `content` and `static` directories, respectively. - -This allows separation of roles and expertise between maintainers, and makes it easy to find the docs files you are looking for. - -## Writing Java SDK docs - -To get up and running to write Java SDK docs, visit the [docs repo](https://github.com/dapr/docs) to initialize your environment. It will clone both the docs repo and this repo, so you can make changes and see it rendered within the site instantly, as well as commit and PR into this repo. - -Make sure to read the [docs contributing guide](https://docs.dapr.io/contributing/contributing-docs/) for information on style/semantics/etc. - -## Docs architecture - -The docs site is built on [Hugo](https://gohugo.io), which lives in the docs repo. This repo is setup as a git submodule so that when the repo is cloned and initialized, the java-sdk repo, along with the docs, are cloned as well. - -Then, in the Hugo configuration file, the `daprdocs/content` and `daprdocs/static` directories are redirected to the `daprdocs/developing-applications/sdks/java` and `static/java` directories, respectively. Thus, all the content within this repo is folded into the main docs site. \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-contributing/java-contributing.md b/daprdocs/content/en/java-sdk-contributing/java-contributing.md deleted file mode 100644 index 03ba6d4e51..0000000000 --- a/daprdocs/content/en/java-sdk-contributing/java-contributing.md +++ /dev/null @@ -1,27 +0,0 @@ ---- -type: docs -title: "Contributing to the Java SDK" -linkTitle: "Java SDK" -weight: 3000 -description: Guidelines for contributing to the Dapr Java SDK ---- - -When contributing to the [Java SDK](https://github.com/dapr/java-sdk) the following rules and best-practices should be followed. - -## Examples - -The `examples` directory contains code samples for users to run to try out specific functionality of the various Java SDK packages and extensions. When writing new and updated samples keep in mind: - -- All examples should be runnable on Windows, Linux, and MacOS. While Java code is consistent among operating systems, any pre/post example commands should provide options through [tabpane]({{% ref "contributing-docs.md#tabbed-content" %}}) -- Contain steps to download/install any required pre-requisites. Someone coming in with a fresh OS install should be able to start on the example and complete it without an error. Links to external download pages are fine. - -## Docs - -The `daprdocs` directory contains the markdown files that are rendered into the [Dapr Docs](https://docs.dapr.io) website. When the documentation website is built, this repo is cloned and configured so that its contents are rendered with the docs content. When writing docs, keep in mind: - - - All rules in the [docs guide]({{% ref contributing-docs.md %}}) should be followed in addition to these. - - All files and directories should be prefixed with `java-` to ensure all file/directory names are globally unique across all Dapr documentation. - -## Github Dapr Bot Commands - -Checkout the [daprbot documentation](https://docs.dapr.io/contributing/daprbot/) for Github commands you can run in this repo for common tasks. For example, you can run the `/assign` (as a comment on an issue) to assign the issue to yourself. diff --git a/daprdocs/content/en/java-sdk-docs/_index.md b/daprdocs/content/en/java-sdk-docs/_index.md deleted file mode 100644 index d640101bc1..0000000000 --- a/daprdocs/content/en/java-sdk-docs/_index.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -type: docs -title: "Dapr Java SDK" -linkTitle: "Java" -weight: 1000 -description: Java SDK packages for developing Dapr applications -cascade: - github_repo: https://github.com/dapr/java-sdk - github_subdir: daprdocs/content/en/java-sdk-docs - path_base_for_github_subdir: content/en/developing-applications/sdks/java/ - github_branch: master ---- - -Dapr offers a variety of packages to help with the development of Java applications. Using them you can create Java clients, servers, and virtual actors with Dapr. - -## Prerequisites - -- [Dapr CLI]({{% ref install-dapr-cli.md %}}) installed -- Initialized [Dapr environment]({{% ref install-dapr-selfhost.md %}}) -- JDK 11 or above - the published jars are compatible with Java 8: - - [AdoptOpenJDK 11 - LTS](https://adoptopenjdk.net/) - - [Oracle's JDK 15](https://www.oracle.com/java/technologies/javase-downloads.html) - - [Oracle's JDK 11 - LTS](https://www.oracle.com/java/technologies/javase-jdk11-downloads.html) - - [OpenJDK](https://openjdk.java.net/) -- Install one of the following build tools for Java: - - [Maven 3.x](https://maven.apache.org/install.html) - - [Gradle 6.x](https://gradle.org/install/) - -## Import Dapr's Java SDK - -Next, import the Java SDK packages to get started. Select your preferred build tool to learn how to import. - -{{< tabpane text=true >}} - -{{% tab header="Maven" %}} - - -For a Maven project, add the following to your `pom.xml` file: - -```xml - - ... - - ... - - - io.dapr - dapr-sdk - 1.16.0 - - - - io.dapr - dapr-sdk-actors - 1.16.0 - - - - io.dapr - dapr-sdk-springboot - 1.16.0 - - ... - - ... - -``` -{{% /tab %}} - -{{% tab header="Gradle" %}} - - -For a Gradle project, add the following to your `build.gradle` file: - -```java -dependencies { -... - // Dapr's core SDK with all features, except Actors. - compile('io.dapr:dapr-sdk:1.16.0') - // Dapr's SDK for Actors (optional). - compile('io.dapr:dapr-sdk-actors:1.16.0') - // Dapr's SDK integration with SpringBoot (optional). - compile('io.dapr:dapr-sdk-springboot:1.16.0') -} -``` - -{{% /tab %}} - -{{< /tabpane >}} - -If you are also using Spring Boot, you may run into a common issue where the `OkHttp` version that the Dapr SDK uses conflicts with the one specified in the Spring Boot _Bill of Materials_. - -You can fix this by specifying a compatible `OkHttp` version in your project to match the version that the Dapr SDK uses: - -```xml - - com.squareup.okhttp3 - okhttp - 1.16.0 - -``` - -## Try it out - -Put the Dapr Java SDK to the test. Walk through the Java quickstarts and tutorials to see Dapr in action: - -| SDK samples | Description | -| ----------- | ----------- | -| [Quickstarts]({{% ref quickstarts %}}) | Experience Dapr's API building blocks in just a few minutes using the Java SDK. | -| [SDK samples](https://github.com/dapr/java-sdk/tree/master/examples) | Clone the SDK repo to try out some examples and get started. | - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // sending a class with message; BINDING_OPERATION="create" - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, myClass).block(); - - // sending a plain string - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, message).block(); -} -``` - -- For a full guide on output bindings visit [How-To: Output bindings]({{% ref howto-bindings.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http) for code samples and instructions to try out output bindings. - -## Available packages - -
-
-
-
Client
-

Create Java clients that interact with a Dapr sidecar and other Dapr applications.

- -
-
-
-
-
Workflow
-

Create and manage workflows that work with other Dapr APIs in Java.

- -
-
-
diff --git a/daprdocs/content/en/java-sdk-docs/java-ai/_index.md b/daprdocs/content/en/java-sdk-docs/java-ai/_index.md deleted file mode 100644 index 904edfc111..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-ai/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "AI" -linkTitle: "AI" -weight: 3000 -description: With the Dapr Conversation AI package, you can interact with the Dapr AI workloads from a Java application. To get started, walk through the [Dapr AI]({{% ref java-ai-howto.md %}}) how-to guide. ---- \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md b/daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md deleted file mode 100644 index 39970d5218..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-ai/java-ai-howto.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -type: docs -title: "How to: Author and manage Dapr Conversation AI in the Java SDK" -linkTitle: "How to: Author and manage Conversation AI" -weight: 20000 -description: How to get up and running with Conversation AI using the Dapr Java SDK ---- - -As part of this demonstration, we will look at how to use the Conversation API to converse with a Large Language Model (LLM). The API -will return the response from the LLM for the given prompt. With the [provided conversation ai example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/conversation), you will: - -- You will provide a prompt using the [Conversation AI example](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/conversation/DemoConversationAI.java) -- Filter out Personally identifiable information (PII). - -This example uses the default configuration from `dapr init` in [self-hosted mode](https://github.com/dapr/cli#install-dapr-on-your-local-machine-self-hosted). - -## Prerequisites - -- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). -- Java JDK 11 (or greater): - - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or - - OpenJDK -- [Apache Maven](https://maven.apache.org/install.html), version 3.x. -- [Docker Desktop](https://www.docker.com/products/docker-desktop) - -## Set up the environment - -Clone the [Java SDK repo](https://github.com/dapr/java-sdk) and navigate into it. - -```bash -git clone https://github.com/dapr/java-sdk.git -cd java-sdk -``` - -Run the following command to install the requirements for running the Conversation AI example with the Dapr Java SDK. - -```bash -mvn clean install -DskipTests -``` - -From the Java SDK root directory, navigate to the examples' directory. - -```bash -cd examples -``` - -Run the Dapr sidecar. - -```sh -dapr run --app-id conversationapp --dapr-grpc-port 51439 --dapr-http-port 3500 --app-port 8080 -``` - -> Now, Dapr is listening for HTTP requests at `http://localhost:3500` and gRPC requests at `http://localhost:51439`. - -## Send a prompt with Personally identifiable information (PII) to the Conversation AI API - -In the `DemoConversationAI` there are steps to send a prompt using the `converse` method under the `DaprPreviewClient`. - -```java -public class DemoConversationAI { - /** - * The main method to start the client. - * - * @param args Input arguments (unused). - */ - public static void main(String[] args) { - try (DaprPreviewClient client = new DaprClientBuilder().buildPreviewClient()) { - System.out.println("Sending the following input to LLM: Hello How are you? This is the my number 672-123-4567"); - - ConversationInput daprConversationInput = new ConversationInput("Hello How are you? " - + "This is the my number 672-123-4567"); - - // Component name is the name provided in the metadata block of the conversation.yaml file. - Mono responseMono = client.converse(new ConversationRequest("echo", - List.of(daprConversationInput)) - .setContextId("contextId") - .setScrubPii(true).setTemperature(1.1d)); - ConversationResponse response = responseMono.block(); - System.out.printf("Conversation output: %s", response.getConversationOutputs().get(0).getResult()); - } catch (Exception e) { - throw new RuntimeException(e); - } - } -} -``` - -Run the `DemoConversationAI` with the following command. - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.conversation.DemoConversationAI -``` - -### Sample output -``` -== APP == Conversation output: Hello How are you? This is the my number -``` - -As shown in the output, the number sent to the API is obfuscated and returned in the form of . -The example above uses an ["echo"](https://docs.dapr.io/developing-applications/building-blocks/conversation/howto-conversation-layer/#set-up-the-conversation-component) -component for testing, which simply returns the input message. -When integrated with LLMs like OpenAI or Claude, you’ll receive meaningful responses instead of echoed input. - -## Next steps -- [Learn more about Conversation AI]({{% ref conversation-overview.md %}}) -- [Conversation AI API reference]({{% ref conversation_api.md %}}) \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-client/_index.md b/daprdocs/content/en/java-sdk-docs/java-client/_index.md deleted file mode 100644 index 5f33eb41e3..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-client/_index.md +++ /dev/null @@ -1,756 +0,0 @@ ---- -type: docs -title: "Getting started with the Dapr client Java SDK" -linkTitle: "Client" -weight: 3000 -description: How to get up and running with the Dapr Java SDK ---- - -The Dapr client package allows you to interact with other Dapr applications from a Java application. - -{{% alert title="Note" color="primary" %}} -If you haven't already, [try out one of the quickstarts]({{% ref quickstarts %}}) for a quick walk-through on how to use the Dapr Java SDK with an API building block. - -{{% /alert %}} - -## Prerequisites - -[Complete initial setup and import the Java SDK into your project]({{% ref java %}}) - -## Initializing the client -You can initialize a Dapr client as so: - -```java -DaprClient client = new DaprClientBuilder().build() -``` - -This will connect to the default Dapr gRPC endpoint `localhost:50001`. For information about configuring the client using environment variables and system properties, see [Properties]({{% ref properties.md %}}). - -#### Error Handling - -Initially, errors in Dapr followed the Standard gRPC error model. However, to provide more detailed and informative error -messages, in version 1.13 an enhanced error model has been introduced which aligns with the gRPC Richer error model. In -response, the Java SDK extended the DaprException to include the error details that were added in Dapr. - -Example of handling the DaprException and consuming the error details when using the Dapr Java SDK: - -```java -... - try { - client.publishEvent("unknown_pubsub", "mytopic", "mydata").block(); - } catch (DaprException exception) { - System.out.println("Dapr exception's error code: " + exception.getErrorCode()); - System.out.println("Dapr exception's message: " + exception.getMessage()); - // DaprException now contains `getStatusDetails()` to include more details about the error from Dapr runtime. - System.out.println("Dapr exception's reason: " + exception.getStatusDetails().get( - DaprErrorDetails.ErrorDetailType.ERROR_INFO, - "reason", - TypeRef.STRING)); - } -... -``` - -## Building blocks - -The Java SDK allows you to interface with all of the [Dapr building blocks]({{% ref building-blocks %}}). - -### Invoke a service - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // invoke a 'GET' method (HTTP) skipping serialization: \say with a Mono return type - // for gRPC set HttpExtension.NONE parameters below - response = client.invokeMethod(SERVICE_TO_INVOKE, METHOD_TO_INVOKE, "{\"name\":\"World!\"}", HttpExtension.GET, byte[].class).block(); - - // invoke a 'POST' method (HTTP) skipping serialization: to \say with a Mono return type - response = client.invokeMethod(SERVICE_TO_INVOKE, METHOD_TO_INVOKE, "{\"id\":\"100\", \"FirstName\":\"Value\", \"LastName\":\"Value\"}", HttpExtension.POST, byte[].class).block(); - - System.out.println(new String(response)); - - // invoke a 'POST' method (HTTP) with serialization: \employees with a Mono return type - Employee newEmployee = new Employee("Nigel", "Guitarist"); - Employee employeeResponse = client.invokeMethod(SERVICE_TO_INVOKE, "employees", newEmployee, HttpExtension.POST, Employee.class).block(); -} -``` - -- For a full guide on service invocation visit [How-To: Invoke a service]({{% ref howto-invoke-discover-services.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/invoke) for code samples and instructions to try out service invocation - -### Save & get application state - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.domain.State; -import reactor.core.publisher.Mono; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // Save state - client.saveState(STATE_STORE_NAME, FIRST_KEY_NAME, myClass).block(); - - // Get state - State retrievedMessage = client.getState(STATE_STORE_NAME, FIRST_KEY_NAME, MyClass.class).block(); - - // Delete state - client.deleteState(STATE_STORE_NAME, FIRST_KEY_NAME).block(); -} -``` - -- For a full list of state operations visit [How-To: Get & save state]({{% ref howto-get-save-state.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/state) for code samples and instructions to try out state management - -### Publish & subscribe to messages - -##### Publish messages - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.domain.Metadata; -import static java.util.Collections.singletonMap; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - client.publishEvent(PUBSUB_NAME, TOPIC_NAME, message, singletonMap(Metadata.TTL_IN_SECONDS, MESSAGE_TTL_IN_SECONDS)).block(); -} -``` - -##### Subscribe to messages - -```java -import com.fasterxml.jackson.databind.ObjectMapper; -import io.dapr.Topic; -import io.dapr.client.domain.BulkSubscribeAppResponse; -import io.dapr.client.domain.BulkSubscribeAppResponseEntry; -import io.dapr.client.domain.BulkSubscribeAppResponseStatus; -import io.dapr.client.domain.BulkSubscribeMessage; -import io.dapr.client.domain.BulkSubscribeMessageEntry; -import io.dapr.client.domain.CloudEvent; -import io.dapr.springboot.annotations.BulkSubscribe; -import org.springframework.web.bind.annotation.PostMapping; -import org.springframework.web.bind.annotation.RequestBody; -import org.springframework.web.bind.annotation.RestController; -import reactor.core.publisher.Mono; - -@RestController -public class SubscriberController { - - private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); - - @Topic(name = "testingtopic", pubsubName = "${myAppProperty:messagebus}") - @PostMapping(path = "/testingtopic") - public Mono handleMessage(@RequestBody(required = false) CloudEvent cloudEvent) { - return Mono.fromRunnable(() -> { - try { - System.out.println("Subscriber got: " + cloudEvent.getData()); - System.out.println("Subscriber got: " + OBJECT_MAPPER.writeValueAsString(cloudEvent)); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } - - @Topic(name = "testingtopic", pubsubName = "${myAppProperty:messagebus}", - rule = @Rule(match = "event.type == 'myevent.v2'", priority = 1)) - @PostMapping(path = "/testingtopicV2") - public Mono handleMessageV2(@RequestBody(required = false) CloudEvent envelope) { - return Mono.fromRunnable(() -> { - try { - System.out.println("Subscriber got: " + cloudEvent.getData()); - System.out.println("Subscriber got: " + OBJECT_MAPPER.writeValueAsString(cloudEvent)); - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } - - @BulkSubscribe() - @Topic(name = "testingtopicbulk", pubsubName = "${myAppProperty:messagebus}") - @PostMapping(path = "/testingtopicbulk") - public Mono handleBulkMessage( - @RequestBody(required = false) BulkSubscribeMessage> bulkMessage) { - return Mono.fromCallable(() -> { - if (bulkMessage.getEntries().size() == 0) { - return new BulkSubscribeAppResponse(new ArrayList()); - } - - System.out.println("Bulk Subscriber received " + bulkMessage.getEntries().size() + " messages."); - - List entries = new ArrayList(); - for (BulkSubscribeMessageEntry entry : bulkMessage.getEntries()) { - try { - System.out.printf("Bulk Subscriber message has entry ID: %s\n", entry.getEntryId()); - CloudEvent cloudEvent = (CloudEvent) entry.getEvent(); - System.out.printf("Bulk Subscriber got: %s\n", cloudEvent.getData()); - entries.add(new BulkSubscribeAppResponseEntry(entry.getEntryId(), BulkSubscribeAppResponseStatus.SUCCESS)); - } catch (Exception e) { - e.printStackTrace(); - entries.add(new BulkSubscribeAppResponseEntry(entry.getEntryId(), BulkSubscribeAppResponseStatus.RETRY)); - } - } - return new BulkSubscribeAppResponse(entries); - }); - } -} -``` - -##### Bulk Publish Messages -> Note: API is in Alpha stage - - -```java -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.BulkPublishResponse; -import io.dapr.client.domain.BulkPublishResponseFailedEntry; -import java.util.ArrayList; -import java.util.List; -class Solution { - public void publishMessages() { - try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) { - // Create a list of messages to publish - List messages = new ArrayList<>(); - for (int i = 0; i < NUM_MESSAGES; i++) { - String message = String.format("This is message #%d", i); - messages.add(message); - System.out.println("Going to publish message : " + message); - } - - // Publish list of messages using the bulk publish API - BulkPublishResponse res = client.publishEvents(PUBSUB_NAME, TOPIC_NAME, "text/plain", messages).block() - } - } -} -``` - -- For a full guide on publishing messages and subscribing to a topic [How-To: Publish & subscribe]({{% ref howto-publish-subscribe.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/pubsub/http) for code samples and instructions to try out pub/sub - -### Interact with output bindings - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - // sending a class with message; BINDING_OPERATION="create" - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, myClass).block(); - - // sending a plain string - client.invokeBinding(BINDING_NAME, BINDING_OPERATION, message).block(); -} -``` - -- For a full guide on output bindings visit [How-To: Output bindings]({{% ref howto-bindings.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http) for code samples and instructions to try out output bindings. - -### Interact with input bindings - -```java -import org.springframework.web.bind.annotation.*; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -@RestController -@RequestMapping("/") -public class myClass { - private static final Logger log = LoggerFactory.getLogger(myClass); - @PostMapping(path = "/checkout") - public Mono getCheckout(@RequestBody(required = false) byte[] body) { - return Mono.fromRunnable(() -> - log.info("Received Message: " + new String(body))); - } -} -``` - -- For a full guide on input bindings, visit [How-To: Input bindings]({{% ref howto-triggers %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http) for code samples and instructions to try out input bindings. - -### Retrieve secrets - -```java -import com.fasterxml.jackson.databind.ObjectMapper; -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import java.util.Map; - -try (DaprClient client = (new DaprClientBuilder()).build()) { - Map secret = client.getSecret(SECRET_STORE_NAME, secretKey).block(); - System.out.println(JSON_SERIALIZER.writeValueAsString(secret)); -} -``` - -- For a full guide on secrets visit [How-To: Retrieve secrets]({{% ref howto-secrets.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/secrets) for code samples and instructions to try out retrieving secrets - -### Actors -An actor is an isolated, independent unit of compute and state with single-threaded execution. Dapr provides an actor implementation based on the [Virtual Actor pattern](https://www.microsoft.com/en-us/research/project/orleans-virtual-actors/), which provides a single-threaded programming model and where actors are garbage collected when not in use. With Dapr's implementaiton, you write your Dapr actors according to the Actor model, and Dapr leverages the scalability and reliability that the underlying platform provides. - -```java -import io.dapr.actors.ActorMethod; -import io.dapr.actors.ActorType; -import reactor.core.publisher.Mono; - -@ActorType(name = "DemoActor") -public interface DemoActor { - - void registerReminder(); - - @ActorMethod(name = "echo_message") - String say(String something); - - void clock(String message); - - @ActorMethod(returns = Integer.class) - Mono incrementAndGet(int delta); -} -``` - -- For a full guide on actors visit [How-To: Use virtual actors in Dapr]({{% ref howto-actors.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/actors) for code samples and instructions to try actors - -### Get & Subscribe to application configurations - -> Note this is a preview API and thus will only be accessible via the DaprPreviewClient interface and not the normal DaprClient interface - -```java -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.ConfigurationItem; -import io.dapr.client.domain.GetConfigurationRequest; -import io.dapr.client.domain.SubscribeConfigurationRequest; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) { - // Get configuration for a single key - Mono item = client.getConfiguration(CONFIG_STORE_NAME, CONFIG_KEY).block(); - - // Get configurations for multiple keys - Mono> items = - client.getConfiguration(CONFIG_STORE_NAME, CONFIG_KEY_1, CONFIG_KEY_2); - - // Subscribe to configuration changes - Flux outFlux = client.subscribeConfiguration(CONFIG_STORE_NAME, CONFIG_KEY_1, CONFIG_KEY_2); - outFlux.subscribe(configItems -> configItems.forEach(...)); - - // Unsubscribe from configuration changes - Mono unsubscribe = client.unsubscribeConfiguration(SUBSCRIPTION_ID, CONFIG_STORE_NAME) -} -``` - -- For a full list of configuration operations visit [How-To: Manage configuration from a store]({{% ref howto-manage-configuration.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/configuration) for code samples and instructions to try out different configuration operations. - -### Query saved state - -> Note this is a preview API and thus will only be accessible via the DaprPreviewClient interface and not the normal DaprClient interface - -```java -import io.dapr.client.DaprClient; -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.QueryStateItem; -import io.dapr.client.domain.QueryStateRequest; -import io.dapr.client.domain.QueryStateResponse; -import io.dapr.client.domain.query.Query; -import io.dapr.client.domain.query.Sorting; -import io.dapr.client.domain.query.filters.EqFilter; - -try (DaprClient client = builder.build(); DaprPreviewClient previewClient = builder.buildPreviewClient()) { - String searchVal = args.length == 0 ? "searchValue" : args[0]; - - // Create JSON data - Listing first = new Listing(); - first.setPropertyType("apartment"); - first.setId("1000"); - ... - Listing second = new Listing(); - second.setPropertyType("row-house"); - second.setId("1002"); - ... - Listing third = new Listing(); - third.setPropertyType("apartment"); - third.setId("1003"); - ... - Listing fourth = new Listing(); - fourth.setPropertyType("apartment"); - fourth.setId("1001"); - ... - Map meta = new HashMap<>(); - meta.put("contentType", "application/json"); - - // Save state - SaveStateRequest request = new SaveStateRequest(STATE_STORE_NAME).setStates( - new State<>("1", first, null, meta, null), - new State<>("2", second, null, meta, null), - new State<>("3", third, null, meta, null), - new State<>("4", fourth, null, meta, null) - ); - client.saveBulkState(request).block(); - - - // Create query and query state request - - Query query = new Query() - .setFilter(new EqFilter<>("propertyType", "apartment")) - .setSort(Arrays.asList(new Sorting("id", Sorting.Order.DESC))); - QueryStateRequest request = new QueryStateRequest(STATE_STORE_NAME) - .setQuery(query); - - // Use preview client to call query state API - QueryStateResponse result = previewClient.queryState(request, MyData.class).block(); - - // View Query state response - System.out.println("Found " + result.getResults().size() + " items."); - for (QueryStateItem item : result.getResults()) { - System.out.println("Key: " + item.getKey()); - System.out.println("Data: " + item.getValue()); - } -} -``` -- For a full how-to on query state, visit [How-To: Query state]({{% ref howto-state-query-api.md %}}). -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/querystate) for complete code sample. - -### Distributed lock - -```java -package io.dapr.examples.lock.grpc; - -import io.dapr.client.DaprClientBuilder; -import io.dapr.client.DaprPreviewClient; -import io.dapr.client.domain.LockRequest; -import io.dapr.client.domain.UnlockRequest; -import io.dapr.client.domain.UnlockResponseStatus; -import reactor.core.publisher.Mono; - -public class DistributedLockGrpcClient { - private static final String LOCK_STORE_NAME = "lockstore"; - - /** - * Executes various methods to check the different apis. - * - * @param args arguments - * @throws Exception throws Exception - */ - public static void main(String[] args) throws Exception { - try (DaprPreviewClient client = (new DaprClientBuilder()).buildPreviewClient()) { - System.out.println("Using preview client..."); - tryLock(client); - unlock(client); - } - } - - /** - * Trying to get lock. - * - * @param client DaprPreviewClient object - */ - public static void tryLock(DaprPreviewClient client) { - System.out.println("*******trying to get a free distributed lock********"); - try { - LockRequest lockRequest = new LockRequest(LOCK_STORE_NAME, "resouce1", "owner1", 5); - Mono result = client.tryLock(lockRequest); - System.out.println("Lock result -> " + (Boolean.TRUE.equals(result.block()) ? "SUCCESS" : "FAIL")); - } catch (Exception ex) { - System.out.println(ex.getMessage()); - } - } - - /** - * Unlock a lock. - * - * @param client DaprPreviewClient object - */ - public static void unlock(DaprPreviewClient client) { - System.out.println("*******unlock a distributed lock********"); - try { - UnlockRequest unlockRequest = new UnlockRequest(LOCK_STORE_NAME, "resouce1", "owner1"); - Mono result = client.unlock(unlockRequest); - System.out.println("Unlock result ->" + result.block().name()); - } catch (Exception ex) { - System.out.println(ex.getMessage()); - } - } -} -``` - -- For a full how-to on distributed lock, visit [How-To: Use a Lock]({{% ref howto-use-distributed-lock.md %}}) -- Visit [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/lock) for complete code sample. - -### Workflow - -```java -package io.dapr.examples.workflows; - -import io.dapr.workflows.client.DaprWorkflowClient; -import io.dapr.workflows.client.WorkflowState; - -import java.time.Duration; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -/** - * For setup instructions, see the README. - */ -public class DemoWorkflowClient { - - /** - * The main method. - * - * @param args Input arguments (unused). - * @throws InterruptedException If program has been interrupted. - */ - public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); - - try (client) { - String separatorStr = "*******"; - System.out.println(separatorStr); - String instanceId = client.scheduleNewWorkflow(DemoWorkflow.class, "input data"); - System.out.printf("Started new workflow instance with random ID: %s%n", instanceId); - - System.out.println(separatorStr); - System.out.println("**GetWorkflowMetadata:Running Workflow**"); - WorkflowState workflowMetadata = client.getWorkflowState(instanceId, true); - System.out.printf("Result: %s%n", workflowMetadata); - - System.out.println(separatorStr); - System.out.println("**WaitForWorkflowStart**"); - try { - WorkflowState waitForWorkflowStartResult = - client.waitForWorkflowStart(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowStartResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowStart has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**SendExternalMessage**"); - client.raiseEvent(instanceId, "TestEvent", "TestEventPayload"); - - System.out.println(separatorStr); - System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload"); - client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload"); - client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload"); - System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId); - - System.out.println(separatorStr); - System.out.println("** Registering Event to be captured by anyOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "e2", "event 2 Payload"); - System.out.printf("Event raised for workflow with instanceId: %s\n", instanceId); - - - System.out.println(separatorStr); - System.out.println("**waitForWorkflowCompletion**"); - try { - WorkflowState waitForWorkflowCompletionResult = - client.waitForWorkflowCompletion(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowCompletionResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowCompletion has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**purgeWorkflow**"); - boolean purgeResult = client.purgeWorkflow(instanceId); - System.out.printf("purgeResult: %s%n", purgeResult); - - System.out.println(separatorStr); - System.out.println("**raiseEvent**"); - - String eventInstanceId = client.scheduleNewWorkflow(DemoWorkflow.class); - System.out.printf("Started new workflow instance with random ID: %s%n", eventInstanceId); - client.raiseEvent(eventInstanceId, "TestException", null); - System.out.printf("Event raised for workflow with instanceId: %s\n", eventInstanceId); - - System.out.println(separatorStr); - String instanceToTerminateId = "terminateMe"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, instanceToTerminateId); - System.out.printf("Started new workflow instance with specified ID: %s%n", instanceToTerminateId); - - TimeUnit.SECONDS.sleep(5); - System.out.println("Terminate this workflow instance manually before the timeout is reached"); - client.terminateWorkflow(instanceToTerminateId, null); - System.out.println(separatorStr); - - String restartingInstanceId = "restarting"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, restartingInstanceId); - System.out.printf("Started new workflow instance with ID: %s%n", restartingInstanceId); - System.out.println("Sleeping 30 seconds to restart the workflow"); - TimeUnit.SECONDS.sleep(30); - - System.out.println("**SendExternalMessage: RestartEvent**"); - client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload"); - - System.out.println("Sleeping 30 seconds to terminate the eternal workflow"); - TimeUnit.SECONDS.sleep(30); - client.terminateWorkflow(restartingInstanceId, null); - } - - System.out.println("Exiting DemoWorkflowClient."); - System.exit(0); - } -} -``` - -- For a full guide on workflows, visit: - - [How-To: Author workflows]({{% ref howto-author-workflow.md %}}). - - [How-To: Manage workflows]({{% ref howto-manage-workflow.md %}}). -- [Learn more about how to use workflows with the Java SDK]({{% ref java-workflow.md %}}). - -## Sidecar APIs - -#### Wait for sidecar -The `DaprClient` also provides a helper method to wait for the sidecar to become healthy (components only). When using -this method, be sure to specify a timeout in milliseconds and block() to wait for the result of a reactive operation. - -```java -// Wait for the Dapr sidecar to report healthy before attempting to use Dapr components. -try (DaprClient client = new DaprClientBuilder().build()) { - System.out.println("Waiting for Dapr sidecar ..."); - client.waitForSidecar(10000).block(); // Specify the timeout in milliseconds - System.out.println("Dapr sidecar is ready."); - ... -} - -// Perform Dapr component operations here i.e. fetching secrets or saving state. -``` - -### Shutdown the sidecar -```java -try (DaprClient client = new DaprClientBuilder().build()) { - logger.info("Sending shutdown request."); - client.shutdown().block(); - logger.info("Ensuring dapr has stopped."); - ... -} -``` - -Learn more about the [Dapr Java SDK packages available to add to your Java applications](https://dapr.github.io/java-sdk/). - -## Security - -### App API Token Authentication - -The building blocks like pubsub, input bindings, or jobs require Dapr to make incoming calls to your application, you can secure these requests using [Dapr App API Token Authentication]({{% ref app-api-token.md %}}). This ensures that only Dapr can invoke your application's endpoints. - -#### Understanding the two tokens - -Dapr uses two different tokens for securing communication. See [Properties]({{% ref properties.md %}}) for detailed information about both tokens: - -- **`DAPR_API_TOKEN`** (Your app → Dapr sidecar): Automatically handled by the Java SDK when using `DaprClient` -- **`APP_API_TOKEN`** (Dapr → Your app): Requires server-side validation in your application - -The examples below show how to implement server-side validation for `APP_API_TOKEN`. - -#### Implementing server-side token validation - -When using gRPC protocol, implement a server interceptor to capture the metadata. - -```java -import io.grpc.Context; -import io.grpc.Contexts; -import io.grpc.Metadata; -import io.grpc.ServerCall; -import io.grpc.ServerCallHandler; -import io.grpc.ServerInterceptor; - -public class SubscriberGrpcService extends AppCallbackGrpc.AppCallbackImplBase { - public static final Context.Key METADATA_KEY = Context.key("grpc-metadata"); - - // gRPC interceptor to capture metadata - public static class MetadataInterceptor implements ServerInterceptor { - @Override - public ServerCall.Listener interceptCall( - ServerCall call, - Metadata headers, - ServerCallHandler next) { - Context contextWithMetadata = Context.current().withValue(METADATA_KEY, headers); - return Contexts.interceptCall(contextWithMetadata, call, headers, next); - } - } - - // Your service methods go here... -} -``` - -Register the interceptor when building your gRPC server: - -```java -Server server = ServerBuilder.forPort(port) - .intercept(new SubscriberGrpcService.MetadataInterceptor()) - .addService(new SubscriberGrpcService()) - .build(); -server.start(); -``` - -Then, in your service methods, extract the token from metadata: - -```java -@Override -public void onTopicEvent(DaprAppCallbackProtos.TopicEventRequest request, - StreamObserver responseObserver) { - try { - // Extract metadata from context - Context context = Context.current(); - Metadata metadata = METADATA_KEY.get(context); - - if (metadata != null) { - String apiToken = metadata.get( - Metadata.Key.of("dapr-api-token", Metadata.ASCII_STRING_MARSHALLER)); - - // Validate token accordingly - } - - // Process the request - // ... - - } catch (Throwable e) { - responseObserver.onError(e); - } -} -``` - -#### Using with HTTP endpoints - -For HTTP-based endpoints, extract the token from the headers: - -```java -@RestController -public class SubscriberController { - - @PostMapping(path = "/endpoint") - public Mono handleRequest( - @RequestBody(required = false) byte[] body, - @RequestHeader Map headers) { - return Mono.fromRunnable(() -> { - try { - // Extract the token from headers - String apiToken = headers.get("dapr-api-token"); - - // Validate token accordingly - - // Process the request - } catch (Exception e) { - throw new RuntimeException(e); - } - }); - } -} -``` - -#### Examples - -For working examples with pubsub, bindings, and jobs: -- [PubSub with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/pubsub#app-api-token-authentication-optional) -- [Bindings with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/bindings/http#app-api-token-authentication-optional) -- [Jobs with App API Token Authentication](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/jobs#app-api-token-authentication-optional) - -## Related links -- [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples) - -For a full list of SDK properties and how to configure them, visit [Properties]({{% ref properties.md %}}). diff --git a/daprdocs/content/en/java-sdk-docs/java-client/properties.md b/daprdocs/content/en/java-sdk-docs/java-client/properties.md deleted file mode 100644 index 87eb7a99c4..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-client/properties.md +++ /dev/null @@ -1,211 +0,0 @@ ---- -type: docs -title: "Properties" -linkTitle: "Properties" -weight: 3001 -description: SDK-wide properties for configuring the Dapr Java SDK using environment variables and system properties ---- - -# Properties - -The Dapr Java SDK provides a set of global properties that control the behavior of the SDK. These properties can be configured using environment variables or system properties. System properties can be set using the `-D` flag when running your Java application. - -These properties affect the entire SDK, including clients and runtime. They control aspects such as: -- Sidecar connectivity (endpoints, ports) -- Security settings (TLS, API tokens) -- Performance tuning (timeouts, connection pools) -- Protocol settings (gRPC, HTTP) -- String encoding - -## Environment Variables - -The following environment variables are available for configuring the Dapr Java SDK: - -### Sidecar Endpoints - -When these variables are set, the client will automatically use them to connect to the Dapr sidecar. - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_ENDPOINT` | The gRPC endpoint for the Dapr sidecar | `localhost:50001` | -| `DAPR_HTTP_ENDPOINT` | The HTTP endpoint for the Dapr sidecar | `localhost:3500` | -| `DAPR_GRPC_PORT` | The gRPC port for the Dapr sidecar (legacy, `DAPR_GRPC_ENDPOINT` takes precedence) | `50001` | -| `DAPR_HTTP_PORT` | The HTTP port for the Dapr sidecar (legacy, `DAPR_HTTP_ENDPOINT` takes precedence) | `3500` | - -### API Tokens - -Dapr supports two types of API tokens for securing communication: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_API_TOKEN` | API token for authenticating requests **from your app to the Dapr sidecar**. The Java SDK automatically includes this token in requests when using `DaprClient`. | `null` | -| `APP_API_TOKEN` | API token for authenticating requests **from Dapr to your app**. When set, Dapr includes this token in the `dapr-api-token` header/metadata when calling your application (for pubsub subscribers, input bindings, or job triggers). Your application must validate this token. | `null` | - -For implementation examples, see [App API Token Authentication]({{% ref java-client#app-api-token-authentication %}}). For more details, see [Dapr API token authentication](https://docs.dapr.io/operations/security/api-token/). - -### gRPC Configuration - -#### TLS Settings -For secure gRPC communication, you can configure TLS settings using the following environment variables: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_TLS_INSECURE` | When set to "true", enables insecure TLS mode which still uses TLS but doesn't verify certificates. This uses InsecureTrustManagerFactory to trust all certificates. This should only be used for testing or in secure environments. | `false` | -| `DAPR_GRPC_TLS_CA_PATH` | Path to the CA certificate file. This is used for TLS connections to servers with self-signed certificates. | `null` | -| `DAPR_GRPC_TLS_CERT_PATH` | Path to the TLS certificate file for client authentication. | `null` | -| `DAPR_GRPC_TLS_KEY_PATH` | Path to the TLS private key file for client authentication. | `null` | - -#### Keepalive Settings -Configure gRPC keepalive behavior using these environment variables: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_ENABLE_KEEP_ALIVE` | Whether to enable gRPC keepalive | `false` | -| `DAPR_GRPC_KEEP_ALIVE_TIME_SECONDS` | gRPC keepalive time in seconds | `10` | -| `DAPR_GRPC_KEEP_ALIVE_TIMEOUT_SECONDS` | gRPC keepalive timeout in seconds | `5` | -| `DAPR_GRPC_KEEP_ALIVE_WITHOUT_CALLS` | Whether to keep gRPC connection alive without calls | `true` | - -#### Inbound Message Settings -Configure gRPC inbound message settings using these environment variables: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_GRPC_MAX_INBOUND_MESSAGE_SIZE_BYTES` | Dapr's maximum inbound message size for gRPC in bytes. This value sets the maximum size of a gRPC message that can be received by the application | `4194304` | -| `DAPR_GRPC_MAX_INBOUND_METADATA_SIZE_BYTES` | Dapr's maximum inbound metadata size for gRPC in bytes | `8192` | - -### HTTP Client Configuration - -These properties control the behavior of the HTTP client used for communication with the Dapr sidecar: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_HTTP_CLIENT_READ_TIMEOUT_SECONDS` | Timeout in seconds for HTTP client read operations. This is the maximum time to wait for a response from the Dapr sidecar. | `60` | -| `DAPR_HTTP_CLIENT_MAX_REQUESTS` | Maximum number of concurrent HTTP requests that can be executed. Above this limit, requests will queue in memory waiting for running calls to complete. | `1024` | -| `DAPR_HTTP_CLIENT_MAX_IDLE_CONNECTIONS` | Maximum number of idle connections in the HTTP connection pool. This is the maximum number of connections that can remain idle in the pool. | `128` | - -### API Configuration - -These properties control the behavior of API calls made through the SDK: - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_API_MAX_RETRIES` | Maximum number of retries for retriable exceptions when making API calls to the Dapr sidecar | `0` | -| `DAPR_API_TIMEOUT_MILLISECONDS` | Timeout in milliseconds for API calls to the Dapr sidecar. A value of 0 means no timeout. | `0` | - -### String Encoding - -| Environment Variable | Description | Default | -|---------------------|-------------|---------| -| `DAPR_STRING_CHARSET` | Character set used for string encoding/decoding in the SDK. Must be a valid Java charset name. | `UTF-8` | - -### System Properties - -All environment variables can be set as system properties using the `-D` flag. Here is the complete list of available system properties: - -| System Property | Description | Default | -|----------------|-------------|---------| -| `dapr.sidecar.ip` | IP address for the Dapr sidecar | `localhost` | -| `dapr.http.port` | HTTP port for the Dapr sidecar | `3500` | -| `dapr.grpc.port` | gRPC port for the Dapr sidecar | `50001` | -| `dapr.grpc.tls.cert.path` | Path to the gRPC TLS certificate | `null` | -| `dapr.grpc.tls.key.path` | Path to the gRPC TLS key | `null` | -| `dapr.grpc.tls.ca.path` | Path to the gRPC TLS CA certificate | `null` | -| `dapr.grpc.tls.insecure` | Whether to use insecure TLS mode | `false` | -| `dapr.grpc.endpoint` | gRPC endpoint for remote sidecar | `null` | -| `dapr.grpc.enable.keep.alive` | Whether to enable gRPC keepalive | `false` | -| `dapr.grpc.keep.alive.time.seconds` | gRPC keepalive time in seconds | `10` | -| `dapr.grpc.keep.alive.timeout.seconds` | gRPC keepalive timeout in seconds | `5` | -| `dapr.grpc.keep.alive.without.calls` | Whether to keep gRPC connection alive without calls | `true` | -| `dapr.http.endpoint` | HTTP endpoint for remote sidecar | `null` | -| `dapr.api.maxRetries` | Maximum number of retries for API calls | `0` | -| `dapr.api.timeoutMilliseconds` | Timeout for API calls in milliseconds | `0` | -| `dapr.api.token` | API token for authentication | `null` | -| `dapr.string.charset` | String encoding used in the SDK | `UTF-8` | -| `dapr.http.client.readTimeoutSeconds` | Timeout in seconds for HTTP client reads | `60` | -| `dapr.http.client.maxRequests` | Maximum number of concurrent HTTP requests | `1024` | -| `dapr.http.client.maxIdleConnections` | Maximum number of idle HTTP connections | `128` | - -## Property Resolution Order - -Properties are resolved in the following order: -1. Override values (if provided when creating a Properties instance) -2. System properties (set via `-D`) -3. Environment variables -4. Default values - -The SDK checks each source in order. If a value is invalid for the property type (e.g., non-numeric for a numeric property), the SDK will log a warning and try the next source. For example: - -```bash -# Invalid boolean value - will be ignored -java -Ddapr.grpc.enable.keep.alive=not-a-boolean -jar myapp.jar - -# Valid boolean value - will be used -export DAPR_GRPC_ENABLE_KEEP_ALIVE=false -``` - -In this case, the environment variable is used because the system property value is invalid. However, if both values are valid, the system property takes precedence: - -```bash -# Valid boolean value - will be used -java -Ddapr.grpc.enable.keep.alive=true -jar myapp.jar - -# Valid boolean value - will be ignored -export DAPR_GRPC_ENABLE_KEEP_ALIVE=false -``` - -Override values can be set using the `DaprClientBuilder` in two ways: - -1. Using individual property overrides (recommended for most cases): -```java -import io.dapr.config.Properties; - -// Set a single property override -DaprClient client = new DaprClientBuilder() - .withPropertyOverride(Properties.GRPC_ENABLE_KEEP_ALIVE, "true") - .build(); - -// Or set multiple property overrides -DaprClient client = new DaprClientBuilder() - .withPropertyOverride(Properties.GRPC_ENABLE_KEEP_ALIVE, "true") - .withPropertyOverride(Properties.HTTP_CLIENT_READ_TIMEOUT_SECONDS, "120") - .build(); -``` - -2. Using a Properties instance (useful when you have many properties to set at once): -```java -// Create a map of property overrides -Map overrides = new HashMap<>(); -overrides.put("dapr.grpc.enable.keep.alive", "true"); -overrides.put("dapr.http.client.readTimeoutSeconds", "120"); - -// Create a Properties instance with overrides -Properties properties = new Properties(overrides); - -// Use these properties when creating a client -DaprClient client = new DaprClientBuilder() - .withProperties(properties) - .build(); -``` - -For most use cases, you'll use system properties or environment variables. Override values are primarily used when you need different property values for different instances of the SDK in the same application. - -## Proxy Configuration - -You can configure proxy settings for your Java application using system properties. These are standard Java system properties that are part of Java's networking layer (`java.net` package), not specific to Dapr. They are used by Java's networking stack, including the HTTP client that Dapr's SDK uses. - -For detailed information about Java's proxy configuration, including all available properties and their usage, see the [Java Networking Properties documentation](https://docs.oracle.com/en/java/javase/11/docs/api/java.base/java/net/doc-files/net-properties.html). - - -For example, here's how to configure a proxy: - -```bash -# Configure HTTP proxy - replace with your actual proxy server details -java -Dhttp.proxyHost=your-proxy-server.com -Dhttp.proxyPort=8080 -jar myapp.jar - -# Configure HTTPS proxy - replace with your actual proxy server details -java -Dhttps.proxyHost=your-proxy-server.com -Dhttps.proxyPort=8443 -jar myapp.jar -``` - -Replace `your-proxy-server.com` with your actual proxy server hostname or IP address, and adjust the port numbers to match your proxy server configuration. - -These proxy settings will affect all HTTP/HTTPS connections made by your Java application, including connections to the Dapr sidecar. \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-jobs/_index.md b/daprdocs/content/en/java-sdk-docs/java-jobs/_index.md deleted file mode 100644 index 9d017f7770..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-jobs/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "Jobs" -linkTitle: "Jobs" -weight: 3000 -description: With the Dapr Jobs package, you can interact with the Dapr Jobs APIs from a Java application to trigger future operations to run according to a predefined schedule with an optional payload. To get started, walk through the [Dapr Jobs]({{% ref java-jobs-howto.md %}}) how-to guide. ---- diff --git a/daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md b/daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md deleted file mode 100644 index e7c6346289..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-jobs/java-jobs-howto.md +++ /dev/null @@ -1,164 +0,0 @@ ---- -type: docs -title: "How to: Author and manage Dapr Jobs in the Java SDK" -linkTitle: "How to: Author and manage Jobs" -weight: 20000 -description: How to get up and running with Jobs using the Dapr Java SDK ---- - -As part of this demonstration we will schedule a Dapr Job. The scheduled job will trigger an endpoint registered in the -same app. With the [provided jobs example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/jobs), you will: - -- Schedule a Job [Job scheduling example](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/jobs/DemoJobsClient.java) -- Register an endpoint for the dapr sidecar to invoke at trigger time [Endpoint Registration](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/jobs/DemoJobsSpringApplication.java) - -This example uses the default configuration from `dapr init` in [self-hosted mode](https://github.com/dapr/cli#install-dapr-on-your-local-machine-self-hosted). - -## Prerequisites - -- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). -- Java JDK 11 (or greater): - - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or - - OpenJDK -- [Apache Maven](https://maven.apache.org/install.html), version 3.x. -- [Docker Desktop](https://www.docker.com/products/docker-desktop) - -## Set up the environment - -Clone the [Java SDK repo](https://github.com/dapr/java-sdk) and navigate into it. - -```bash -git clone https://github.com/dapr/java-sdk.git -cd java-sdk -``` - -Run the following command to install the requirements for running the jobs example with the Dapr Java SDK. - -```bash -mvn clean install -DskipTests -``` - -From the Java SDK root directory, navigate to the examples' directory. - -```bash -cd examples -``` - -Run the Dapr sidecar. - -```sh -dapr run --app-id jobsapp --dapr-grpc-port 51439 --dapr-http-port 3500 --app-port 8080 -``` - -> Now, Dapr is listening for HTTP requests at `http://localhost:3500` and internal Jobs gRPC requests at `http://localhost:51439`. - -## Schedule and Get a job - -In the `DemoJobsClient` there are steps to schedule a job. Calling `scheduleJob` using the `DaprPreviewClient` -will schedule a job with the Dapr Runtime. - -```java -public class DemoJobsClient { - - /** - * The main method of this app to schedule and get jobs. - */ - public static void main(String[] args) throws Exception { - try (DaprPreviewClient client = new DaprClientBuilder().withPropertyOverrides(overrides).buildPreviewClient()) { - - // Schedule a job. - System.out.println("**** Scheduling a Job with name dapr-jobs-1 *****"); - ScheduleJobRequest scheduleJobRequest = new ScheduleJobRequest("dapr-job-1", - JobSchedule.fromString("* * * * * *")).setData("Hello World!".getBytes()); - client.scheduleJob(scheduleJobRequest).block(); - - System.out.println("**** Scheduling job dapr-jobs-1 completed *****"); - } - } -} -``` - -Call `getJob` to retrieve the job details that were previously created and scheduled. -``` -client.getJob(new GetJobRequest("dapr-job-1")).block() -``` - -Run the `DemoJobsClient` with the following command. - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.jobs.DemoJobsClient -``` - -### Sample output -``` -**** Scheduling a Job with name dapr-jobs-1 ***** -**** Scheduling job dapr-jobs-1 completed ***** -**** Retrieving a Job with name dapr-jobs-1 ***** -``` - -## Set up an endpoint to be invoked when the job is triggered - -The `DemoJobsSpringApplication` class starts a Spring Boot application that registers the endpoints specified in the `JobsController` -This endpoint acts like a callback for the scheduled job requests. - -```java -@RestController -public class JobsController { - - /** - * Handles jobs callback from Dapr. - * - * @param jobName name of the job. - * @param payload data from the job if payload exists. - * @return Empty Mono. - */ - @PostMapping("/job/{jobName}") - public Mono handleJob(@PathVariable("jobName") String jobName, - @RequestBody(required = false) byte[] payload) { - System.out.println("Job Name: " + jobName); - System.out.println("Job Payload: " + new String(payload)); - - return Mono.empty(); - } -} -``` - -Parameters: - -* `jobName`: The name of the triggered job. -* `payload`: Optional payload data associated with the job (as a byte array). - -Run the Spring Boot application with the following command. - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.jobs.DemoJobsSpringApplication -``` - -### Sample output -``` -Job Name: dapr-job-1 -Job Payload: Hello World! -``` - -## Delete a scheduled job - -```java -public class DemoJobsClient { - - /** - * The main method of this app deletes a job that was previously scheduled. - */ - public static void main(String[] args) throws Exception { - try (DaprPreviewClient client = new DaprClientBuilder().buildPreviewClient()) { - - // Delete a job. - System.out.println("**** Delete a Job with name dapr-jobs-1 *****"); - client.deleteJob(new DeleteJobRequest("dapr-job-1")).block(); - } - } -} -``` - -## Next steps -- [Learn more about Jobs]({{% ref jobs-overview.md %}}) -- [Jobs API reference]({{% ref jobs_api.md %}}) \ No newline at end of file diff --git a/daprdocs/content/en/java-sdk-docs/java-workflow/_index.md b/daprdocs/content/en/java-sdk-docs/java-workflow/_index.md deleted file mode 100644 index ecfb7adeb6..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-workflow/_index.md +++ /dev/null @@ -1,7 +0,0 @@ ---- -type: docs -title: "Workflow" -linkTitle: "Workflow" -weight: 3000 -description: How to get up and running with the Dapr Workflow extension ---- diff --git a/daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md b/daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md deleted file mode 100644 index 79c6e06d0c..0000000000 --- a/daprdocs/content/en/java-sdk-docs/java-workflow/java-workflow-howto.md +++ /dev/null @@ -1,284 +0,0 @@ ---- -type: docs -title: "How to: Author and manage Dapr Workflow in the Java SDK" -linkTitle: "How to: Author and manage workflows" -weight: 20000 -description: How to get up and running with workflows using the Dapr Java SDK ---- - -Let's create a Dapr workflow and invoke it using the console. With the [provided workflow example](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples/workflows), you will: - -- Execute the workflow instance using the [Java workflow worker](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowWorker.java) -- Utilize the Java workflow client and API calls to [start and terminate workflow instances](https://github.com/dapr/java-sdk/blob/master/examples/src/main/java/io/dapr/examples/workflows/DemoWorkflowClient.java) - -This example uses the default configuration from `dapr init` in [self-hosted mode](https://github.com/dapr/cli#install-dapr-on-your-local-machine-self-hosted). - -## Prerequisites - -- [Dapr CLI and initialized environment](https://docs.dapr.io/getting-started). -- Java JDK 11 (or greater): - - [Oracle JDK](https://www.oracle.com/java/technologies/downloads), or - - OpenJDK -- [Apache Maven](https://maven.apache.org/install.html), version 3.x. - -- [Docker Desktop](https://www.docker.com/products/docker-desktop) - -- Verify you're using the latest proto bindings - -## Set up the environment - -Clone the Java SDK repo and navigate into it. - -```bash -git clone https://github.com/dapr/java-sdk.git -cd java-sdk -``` - -Run the following command to install the requirements for running this workflow sample with the Dapr Java SDK. - -```bash -mvn clean install -``` - -From the Java SDK root directory, navigate to the Dapr Workflow example. - -```bash -cd examples -``` - -## Run the `DemoWorkflowWorker` - -The `DemoWorkflowWorker` class registers an implementation of `DemoWorkflow` in Dapr's workflow runtime engine. In the `DemoWorkflowWorker.java` file, you can find the `DemoWorkflowWorker` class and the `main` method: - -```java -public class DemoWorkflowWorker { - - public static void main(String[] args) throws Exception { - // Register the Workflow with the runtime. - WorkflowRuntime.getInstance().registerWorkflow(DemoWorkflow.class); - System.out.println("Start workflow runtime"); - WorkflowRuntime.getInstance().startAndBlock(); - System.exit(0); - } -} -``` - -In the code above: -- `WorkflowRuntime.getInstance().registerWorkflow()` registers `DemoWorkflow` as a workflow in the Dapr Workflow runtime. -- `WorkflowRuntime.getInstance().start()` builds and starts the engine within the Dapr Workflow runtime. - -In the terminal, execute the following command to kick off the `DemoWorkflowWorker`: - -```sh -dapr run --app-id demoworkflowworker --resources-path ./components/workflows --dapr-grpc-port 50001 -- java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.workflows.DemoWorkflowWorker -``` - -**Expected output** - -``` -You're up and running! Both Dapr and your app logs will appear here. - -... - -== APP == Start workflow runtime -== APP == Sep 13, 2023 9:02:03 AM com.microsoft.durabletask.DurableTaskGrpcWorker startAndBlock -== APP == INFO: Durable Task worker is connecting to sidecar at 127.0.0.1:50001. -``` - -## Run the `DemoWorkflowClient` - -The `DemoWorkflowClient` starts instances of workflows that have been registered with Dapr. - -```java -public class DemoWorkflowClient { - - // ... - public static void main(String[] args) throws InterruptedException { - DaprWorkflowClient client = new DaprWorkflowClient(); - - try (client) { - String separatorStr = "*******"; - System.out.println(separatorStr); - String instanceId = client.scheduleNewWorkflow(DemoWorkflow.class, "input data"); - System.out.printf("Started new workflow instance with random ID: %s%n", instanceId); - - System.out.println(separatorStr); - System.out.println("**GetInstanceMetadata:Running Workflow**"); - WorkflowState workflowMetadata = client.getWorkflowState(instanceId, true); - System.out.printf("Result: %s%n", workflowMetadata); - - System.out.println(separatorStr); - System.out.println("**WaitForWorkflowStart**"); - try { - WorkflowState waitForWorkflowStartResult = - client.waitForWorkflowStart(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowStartResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowStart has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**SendExternalMessage**"); - client.raiseEvent(instanceId, "TestEvent", "TestEventPayload"); - - System.out.println(separatorStr); - System.out.println("** Registering parallel Events to be captured by allOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "event1", "TestEvent 1 Payload"); - client.raiseEvent(instanceId, "event2", "TestEvent 2 Payload"); - client.raiseEvent(instanceId, "event3", "TestEvent 3 Payload"); - System.out.printf("Events raised for workflow with instanceId: %s\n", instanceId); - - System.out.println(separatorStr); - System.out.println("** Registering Event to be captured by anyOf(t1,t2,t3) **"); - client.raiseEvent(instanceId, "e2", "event 2 Payload"); - System.out.printf("Event raised for workflow with instanceId: %s\n", instanceId); - - - System.out.println(separatorStr); - System.out.println("**waitForWorkflowCompletion**"); - try { - WorkflowState waitForWorkflowCompletionResult = - client.waitForWorkflowCompletion(instanceId, Duration.ofSeconds(60), true); - System.out.printf("Result: %s%n", waitForWorkflowCompletionResult); - } catch (TimeoutException ex) { - System.out.printf("waitForWorkflowCompletion has an exception:%s%n", ex); - } - - System.out.println(separatorStr); - System.out.println("**purgeWorkflow**"); - boolean purgeResult = client.purgeWorkflow(instanceId); - System.out.printf("purgeResult: %s%n", purgeResult); - - System.out.println(separatorStr); - System.out.println("**raiseEvent**"); - - String eventInstanceId = client.scheduleNewWorkflow(DemoWorkflow.class); - System.out.printf("Started new workflow instance with random ID: %s%n", eventInstanceId); - client.raiseEvent(eventInstanceId, "TestException", null); - System.out.printf("Event raised for workflow with instanceId: %s\n", eventInstanceId); - - System.out.println(separatorStr); - String instanceToTerminateId = "terminateMe"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, instanceToTerminateId); - System.out.printf("Started new workflow instance with specified ID: %s%n", instanceToTerminateId); - - TimeUnit.SECONDS.sleep(5); - System.out.println("Terminate this workflow instance manually before the timeout is reached"); - client.terminateWorkflow(instanceToTerminateId, null); - System.out.println(separatorStr); - - String restartingInstanceId = "restarting"; - client.scheduleNewWorkflow(DemoWorkflow.class, null, restartingInstanceId); - System.out.printf("Started new workflow instance with ID: %s%n", restartingInstanceId); - System.out.println("Sleeping 30 seconds to restart the workflow"); - TimeUnit.SECONDS.sleep(30); - - System.out.println("**SendExternalMessage: RestartEvent**"); - client.raiseEvent(restartingInstanceId, "RestartEvent", "RestartEventPayload"); - - System.out.println("Sleeping 30 seconds to terminate the eternal workflow"); - TimeUnit.SECONDS.sleep(30); - client.terminateWorkflow(restartingInstanceId, null); - } - - System.out.println("Exiting DemoWorkflowClient."); - System.exit(0); - } -} -``` - -In a second terminal window, start the workflow by running the following command: - -```sh -java -jar target/dapr-java-sdk-examples-exec.jar io.dapr.examples.workflows.DemoWorkflowClient -``` - -**Expected output** - -``` -******* -Started new workflow instance with random ID: 0b4cc0d5-413a-4c1c-816a-a71fa24740d4 -******* -**GetInstanceMetadata:Running Workflow** -Result: [Name: 'io.dapr.examples.workflows.DemoWorkflow', ID: '0b4cc0d5-413a-4c1c-816a-a71fa24740d4', RuntimeStatus: RUNNING, CreatedAt: 2023-09-13T13:02:30.547Z, LastUpdatedAt: 2023-09-13T13:02:30.699Z, Input: '"input data"', Output: ''] -******* -**WaitForWorkflowStart** -Result: [Name: 'io.dapr.examples.workflows.DemoWorkflow', ID: '0b4cc0d5-413a-4c1c-816a-a71fa24740d4', RuntimeStatus: RUNNING, CreatedAt: 2023-09-13T13:02:30.547Z, LastUpdatedAt: 2023-09-13T13:02:30.699Z, Input: '"input data"', Output: ''] -******* -**SendExternalMessage** -******* -** Registering parallel Events to be captured by allOf(t1,t2,t3) ** -Events raised for workflow with instanceId: 0b4cc0d5-413a-4c1c-816a-a71fa24740d4 -******* -** Registering Event to be captured by anyOf(t1,t2,t3) ** -Event raised for workflow with instanceId: 0b4cc0d5-413a-4c1c-816a-a71fa24740d4 -******* -**WaitForWorkflowCompletion** -Result: [Name: 'io.dapr.examples.workflows.DemoWorkflow', ID: '0b4cc0d5-413a-4c1c-816a-a71fa24740d4', RuntimeStatus: FAILED, CreatedAt: 2023-09-13T13:02:30.547Z, LastUpdatedAt: 2023-09-13T13:02:55.054Z, Input: '"input data"', Output: ''] -******* -**purgeWorkflow** -purgeResult: true -******* -**raiseEvent** -Started new workflow instance with random ID: 7707d141-ebd0-4e54-816e-703cb7a52747 -Event raised for workflow with instanceId: 7707d141-ebd0-4e54-816e-703cb7a52747 -******* -Started new workflow instance with specified ID: terminateMe -Terminate this workflow instance manually before the timeout is reached -******* -Started new workflow instance with ID: restarting -Sleeping 30 seconds to restart the workflow -**SendExternalMessage: RestartEvent** -Sleeping 30 seconds to terminate the eternal workflow -Exiting DemoWorkflowClient. -``` - -## What happened? - -1. When you ran `dapr run`, the workflow worker registered the workflow (`DemoWorkflow`) and its actvities to the Dapr Workflow engine. -1. When you ran `java`, the workflow client started the workflow instance with the following activities. You can follow along with the output in the terminal where you ran `dapr run`. - 1. The workflow is started, raises three parallel tasks, and waits for them to complete. - 1. The workflow client calls the activity and sends the "Hello Activity" message to the console. - 1. The workflow times out and is purged. - 1. The workflow client starts a new workflow instance with a random ID, uses another workflow instance called `terminateMe` to terminate it, and restarts it with the workflow called `restarting`. - 1. The worfklow client is then exited. - -## Next steps -- [Learn more about Dapr workflow]({{% ref workflow-overview.md %}}) -- [Workflow API reference]({{% ref workflow_api.md %}}) - -## Advanced features - -### Task Execution Keys - -Task execution keys are unique identifiers generated by the durabletask-java library. They are stored in the `WorkflowActivityContext` and can be used to track and manage the execution of workflow activities. They are particularly useful for: - -1. **Idempotency**: Ensuring activities are not executed multiple times for the same task -2. **State Management**: Tracking the state of activity execution -3. **Error Handling**: Managing retries and failures in a controlled manner - -Here's an example of how to use task execution keys in your workflow activities: - -```java -public class TaskExecutionKeyActivity implements WorkflowActivity { - @Override - public Object run(WorkflowActivityContext ctx) { - // Get the task execution key for this activity - String taskExecutionKey = ctx.getTaskExecutionKey(); - - // Use the key to implement idempotency or state management - // For example, check if this task has already been executed - if (isTaskAlreadyExecuted(taskExecutionKey)) { - return getPreviousResult(taskExecutionKey); - } - - // Execute the activity logic - Object result = executeActivityLogic(); - - // Store the result with the task execution key - storeResult(taskExecutionKey, result); - - return result; - } -} -``` diff --git a/daprdocs/content/en/java-sdk-docs/spring-boot/_index.md b/daprdocs/content/en/java-sdk-docs/spring-boot/_index.md deleted file mode 100644 index fcfaacd1a6..0000000000 --- a/daprdocs/content/en/java-sdk-docs/spring-boot/_index.md +++ /dev/null @@ -1,347 +0,0 @@ ---- -type: docs -title: "Getting started with the Dapr and Spring Boot" -linkTitle: "Spring Boot Integration" -weight: 4000 -description: How to get started with Dapr and Spring Boot ---- - -By combining Dapr and Spring Boot, we can create infrastructure independent Java applications that can be deployed across different environments, supporting a wide range of on-premises and cloud provider services. - -First, we will start with a simple integration covering the `DaprClient` and the [Testcontainers](https://testcontainers.com/) integration, to then use Spring and Spring Boot mechanisms and programming model to leverage the Dapr APIs under the hood. This helps teams to remove dependencies such as clients and drivers required to connect to environment-specific infrastructure (databases, key-value stores, message brokers, configuration/secret stores, etc) - -{{% alert title="Note" color="primary" %}} -The Spring Boot integration requires Spring Boot 3.x+ to work. This will not work with Spring Boot 2.x. -The Spring Boot integration remains in alpha. We need your help and feedback to graduate it. -Please join the [#java-sdk discord channel](https://discord.com/channels/778680217417809931/778749797242765342) discussion or open issues in the [dapr/java-sdk](https://github.com/dapr/java-sdk/issues). - -{{% /alert %}} - - -## Adding the Dapr and Spring Boot integration to your project - -If you already have a Spring Boot application, you can directly add the following dependencies to your project: - -``` - - io.dapr.spring - dapr-spring-boot-starter - 0.16.0 - - - io.dapr.spring - dapr-spring-boot-starter-test - 0.16.0 - test - -``` - -You can find the [latest released version here](https://central.sonatype.com/artifact/io.dapr.spring/dapr-spring-boot-starter). - -By adding these dependencies, you can: -- Autowire a `DaprClient` to use inside your applications -- Use the Spring Data and Messaging abstractions and programming model that uses the Dapr APIs under the hood -- Improve your inner-development loop by relying on [Testcontainers](https://testcontainers.com/) to bootstrap Dapr Control plane services and default components - -Once these dependencies are in your application, you can rely on Spring Boot autoconfiguration to autowire a `DaprClient` instance: - -```java -@Autowired -private DaprClient daprClient; - -``` - -This will connect to the default Dapr gRPC endpoint `localhost:50001`, requiring you to start Dapr outside of your application. - -{{% alert title="Note" color="primary" %}} -By default, the following properties are preconfigured for `DaprClient` and `DaprWorkflowClient`: -```properties -dapr.client.httpEndpoint=http://localhost -dapr.client.httpPort=3500 -dapr.client.grpcEndpoint=localhost -dapr.client.grpcPort=50001 -dapr.client.apiToken= -``` -These values are used by default, but you can override them in your `application.properties` file to suit your environment. Please note that both kebab case and camel case are supported. -{{% /alert %}} - -You can use the `DaprClient` to interact with the Dapr APIs anywhere in your application, for example from inside a REST endpoint: - -```java -@RestController -public class DemoRestController { - @Autowired - private DaprClient daprClient; - - @PostMapping("/store") - public void storeOrder(@RequestBody Order order){ - daprClient.saveState("kvstore", order.orderId(), order).block(); - } -} - -record Order(String orderId, Integer amount){} -``` - -If you want to avoid managing Dapr outside of your Spring Boot application, you can rely on [Testcontainers](https://testcontainers.com/) to bootstrap Dapr beside your application for development purposes. -To do this we can create a test configuration that uses `Testcontainers` to bootstrap all we need to develop our applications using the Dapr APIs. - -Using [Testcontainers](https://testcontainers.com/) and Dapr integrations, we let the `@TestConfiguration` bootstrap Dapr for our applications. -Notice that for this example, we are configuring Dapr with a Statestore component called `kvstore` that connects to an instance of `PostgreSQL` also bootstrapped by Testcontainers. - -```java -@TestConfiguration(proxyBeanMethods = false) -public class DaprTestContainersConfig { - @Bean - @ServiceConnection - public DaprContainer daprContainer(Network daprNetwork, PostgreSQLContainer postgreSQLContainer){ - - return new DaprContainer("daprio/daprd:1.16.0-rc.5") - .withAppName("producer-app") - .withNetwork(daprNetwork) - .withComponent(new Component("kvstore", "state.postgresql", "v1", STATE_STORE_PROPERTIES)) - .withComponent(new Component("kvbinding", "bindings.postgresql", "v1", BINDING_PROPERTIES)) - .dependsOn(postgreSQLContainer); - } -} -``` - -Inside the test classpath you can add a new Spring Boot Application that uses this configuration for tests: - -```java -@SpringBootApplication -public class TestProducerApplication { - - public static void main(String[] args) { - - SpringApplication - .from(ProducerApplication::main) - .with(DaprTestContainersConfig.class) - .run(args); - } - -} -``` - -Now you can start your application with: -```bash -mvn spring-boot:test-run -``` - -Running this command will start the application, using the provided test configuration that includes the Testcontainers and Dapr integration. In the logs you should be able to see that the `daprd` and the `placement` service containers were started for your application. - -Besides the previous configuration (`DaprTestContainersConfig`) your tests shouldn't be testing Dapr itself, just the REST endpoints that your application is exposing. - - -## Leveraging Spring & Spring Boot programming model with Dapr - -The Java SDK allows you to interface with all of the [Dapr building blocks]({{% ref building-blocks %}}). -But if you want to leverage the Spring and Spring Boot programming model you can use the `dapr-spring-boot-starter` integration. -This includes implementations of Spring Data (`KeyValueTemplate` and `CrudRepository`) as well as a `DaprMessagingTemplate` for producing and consuming messages -(similar to [Spring Kafka](https://spring.io/projects/spring-kafka), [Spring Pulsar](https://spring.io/projects/spring-pulsar) and [Spring AMQP for RabbitMQ](https://spring.io/projects/spring-amqp)) and Dapr workflows. - -## Using Spring Data `CrudRepository` and `KeyValueTemplate` - -You can use well known Spring Data constructs relying on a Dapr-based implementation. -With Dapr, you don't need to add any infrastructure-related driver or client, making your Spring application lighter and decoupled from the environment where it is running. - -Under the hood these implementations use the Dapr Statestore and Binding APIs. - -### Configuration parameters - -With Spring Data abstractions you can configure which statestore and bindings will be used by Dapr to connect to the available infrastructure. -This can be done by setting the following properties: - -```properties -dapr.statestore.name=kvstore -dapr.statestore.binding=kvbinding -``` - -Then you can `@Autowire` a `KeyValueTemplate` or a `CrudRepository` like this: - -```java -@RestController -@EnableDaprRepositories -public class OrdersRestController { - @Autowired - private OrderRepository repository; - - @PostMapping("/orders") - public void storeOrder(@RequestBody Order order){ - repository.save(order); - } - - @GetMapping("/orders") - public Iterable getAll(){ - return repository.findAll(); - } - - -} -``` - -Where `OrderRepository` is defined in an interface that extends the Spring Data `CrudRepository` interface: - -```java -public interface OrderRepository extends CrudRepository {} -``` - -Notice that the `@EnableDaprRepositories` annotation does all the magic of wiring the Dapr APIs under the `CrudRespository` interface. -Because Dapr allow users to interact with different StateStores from the same application, as a user you need to provide the following beans as a Spring Boot `@Configuration`: - -```java -@Configuration -@EnableConfigurationProperties({DaprStateStoreProperties.class}) -public class ProducerAppConfiguration { - - @Bean - public KeyValueAdapterResolver keyValueAdapterResolver(DaprClient daprClient, ObjectMapper mapper, DaprStateStoreProperties daprStatestoreProperties) { - String storeName = daprStatestoreProperties.getName(); - String bindingName = daprStatestoreProperties.getBinding(); - - return new DaprKeyValueAdapterResolver(daprClient, mapper, storeName, bindingName); - } - - @Bean - public DaprKeyValueTemplate daprKeyValueTemplate(KeyValueAdapterResolver keyValueAdapterResolver) { - return new DaprKeyValueTemplate(keyValueAdapterResolver); - } - -} -``` - -## Using Spring Messaging for producing and consuming events - -Similar to Spring Kafka, Spring Pulsar and Spring AMQP you can use the `DaprMessagingTemplate` to publish messages to the configured infrastructure. To consume messages you can use the `@Topic` annotation (soon to be renamed to `@DaprListener`). - -To publish events/messages you can `@Autowired` the `DaprMessagingTemplate` in your Spring application. -For this example we will be publishing `Order` events and we are sending messages to the topic named `topic`. - -```java -@Autowired -private DaprMessagingTemplate messagingTemplate; - -@PostMapping("/orders") -public void storeOrder(@RequestBody Order order){ - repository.save(order); - messagingTemplate.send("topic", order); -} - -``` - -Similarly to the `CrudRepository` we need to specify which PubSub broker do we want to use to publish and consume our messages. - -```properties -dapr.pubsub.name=pubsub -``` - -Because with Dapr you can connect to multiple PubSub brokers you need to provide the following bean to let Dapr know which PubSub broker your `DaprMessagingTemplate` will use: -```java -@Bean -public DaprMessagingTemplate messagingTemplate(DaprClient daprClient, - DaprPubSubProperties daprPubSubProperties) { - return new DaprMessagingTemplate<>(daprClient, daprPubSubProperties.getName()); -} -``` - -Finally, because Dapr PubSub requires a bidirectional connection between your application and Dapr you need to expand your Testcontainers configuration with a few parameters: - -```java -@Bean -@ServiceConnection -public DaprContainer daprContainer(Network daprNetwork, PostgreSQLContainer postgreSQLContainer, RabbitMQContainer rabbitMQContainer){ - - return new DaprContainer("daprio/daprd:1.16.0-rc.5") - .withAppName("producer-app") - .withNetwork(daprNetwork) - .withComponent(new Component("kvstore", "state.postgresql", "v1", STATE_STORE_PROPERTIES)) - .withComponent(new Component("kvbinding", "bindings.postgresql", "v1", BINDING_PROPERTIES)) - .withComponent(new Component("pubsub", "pubsub.rabbitmq", "v1", rabbitMqProperties)) - .withAppPort(8080) - .withAppChannelAddress("host.testcontainers.internal") - .dependsOn(rabbitMQContainer) - .dependsOn(postgreSQLContainer); -} -``` - -Now, in the Dapr configuration we have included a `pubsub` component that will connect to an instance of RabbitMQ started by Testcontainers. -We have also set two important parameters `.withAppPort(8080)` and `.withAppChannelAddress("host.testcontainers.internal")` which allows Dapr to -contact back to the application when a message is published in the broker. - -To listen to events/messages you need to expose an endpoint in the application that will be responsible to receive the messages. -If you expose a REST endpoint you can use the `@Topic` annotation to let Dapr know where it needs to forward the events/messages too: - -```java -@PostMapping("subscribe") -@Topic(pubsubName = "pubsub", name = "topic") -public void subscribe(@RequestBody CloudEvent cloudEvent){ - events.add(cloudEvent); -} -``` - -Upon bootstrapping your application, Dapr will register the subscription to messages to be forwarded to the `subscribe` endpoint exposed by your application. - -If you are writing tests for these subscribers you need to ensure that Testcontainers knows that your application will be running on port 8080, -so containers started with Testcontainers know where your application is: - -```java -@BeforeAll -public static void setup(){ - org.testcontainers.Testcontainers.exposeHostPorts(8080); -} -``` - -You can check and run the [full example source code here](https://github.com/salaboy/dapr-spring-boot-docs-examples). - -## Using Dapr Workflows with Spring Boot - -Following the same approach that we used for Spring Data and Spring Messaging, the `dapr-spring-boot-starter` brings Dapr Workflow integration for Spring Boot users. - -To work with Dapr Workflows you need to define and implement your workflows using code. The Dapr Spring Boot Starter makes your life easier by managing `Workflow`s and `WorkflowActivity`s as Spring beans. - -In order to enable the automatic bean discovery you can annotate your `@SpringBootApplication` with the `@EnableDaprWorkflows` annotation: - -``` -@SpringBootApplication -@EnableDaprWorkflows -public class MySpringBootApplication {} -``` - -By adding this annotation, all the `WorkflowActivity`s will be automatically managed by Spring and registered to the workflow engine. - -By having all `WorkflowActivity`s as managed beans we can use Spring `@Autowired` mechanism to inject any bean that our workflow activity might need to implement its functionality, for example the `@RestTemplate`: - -``` -public class MyWorkflowActivity implements WorkflowActivity { - - @Autowired - private RestTemplate restTemplate; -``` - -You can also `@Autowired` the `DaprWorkflowClient` to create new instances of your workflows. - -``` -@Autowired -private DaprWorkflowClient daprWorkflowClient; -``` - -This enable applications to schedule new workflow instances and raise events. - -``` -String instanceId = daprWorkflowClient.scheduleNewWorkflow(MyWorkflow.class, payload); -``` - -and - -``` -daprWorkflowClient.raiseEvent(instanceId, "MyEvenet", event); -``` - -Check the [Dapr Workflow documentation](https://docs.dapr.io/developing-applications/building-blocks/workflow/workflow-overview/) for more information about how to work with Dapr Workflows. - - -## Next steps - -Learn more about the [Dapr Java SDK packages available to add to your Java applications](https://dapr.github.io/java-sdk/). - -## Related links -- [Java SDK examples](https://github.com/dapr/java-sdk/tree/master/examples/src/main/java/io/dapr/examples) From 5d65320fd546364c7eafdd6e79235cda2e690c0d Mon Sep 17 00:00:00 2001 From: salaboy Date: Sat, 13 Dec 2025 05:06:33 +0100 Subject: [PATCH 13/17] Bringing Durable Task Java as a Maven module inside the Java SDK (#1575) * fixing checkstyle and javadocs Signed-off-by: salaboy * Replace openjdk:17-jdk-slim to eclipse-temurin:17-jdk-jammy (#1574) Signed-off-by: Matheus Cruz Signed-off-by: salaboy * Align Java API with other languages (#1560) * Align Java API with other languages Signed-off-by: Matheus Cruz * Update documentation Signed-off-by: Matheus Cruz * Change return type of waitForWorkflowStart method Signed-off-by: artur-ciocanu --------- Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy * use built in durable task Signed-off-by: salaboy * exclude jacoco rules for examples and durabletask-client Signed-off-by: salaboy * increasing timeout for IT Signed-off-by: salaboy * removing dt build from matrix Signed-off-by: salaboy * adding java to dt build Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * docs: add architecture diagram to README (#1549) * Preview New README * Preview New README 2 * Preview New README 3 * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime CORRECTION (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) * docs: add architecture diagram showing Java SDK interaction with Dapr runtime (close #<915>) --------- Co-authored-by: Siri Varma Vegiraju Co-authored-by: artur-ciocanu Co-authored-by: Cassie Coyle Signed-off-by: salaboy * Add statestore example with Outbox pattern (#1582) * Add statestore example with Outbox pattern Signed-off-by: Matheus Cruz * Clean events after each test Signed-off-by: Matheus Cruz * Add license header Signed-off-by: Matheus Cruz * Apply pull request suggestions Signed-off-by: Matheus Cruz --------- Signed-off-by: Matheus Cruz Co-authored-by: salaboy Signed-off-by: salaboy * adding new method signature plus test (#1570) * adding new method signature plus test Signed-off-by: salaboy * re adding imports Signed-off-by: salaboy * fixing style Signed-off-by: salaboy * checking empty metadata Signed-off-by: salaboy * copy meta for safety and check if key is present Signed-off-by: salaboy * Centralize Maven dependency version management (#1564) Signed-off-by: salaboy * Fix dependencies multi app build and add proper test deps (#1572) * Force Jackson version to override the SB Jackson version Signed-off-by: Artur Ciocanu * Move all the Jackson deps to parent POM. Signed-off-by: Artur Ciocanu * Ensure app JAR build order Signed-off-by: Artur Ciocanu * Remove explicit Jackson from sdk-tests module. Signed-off-by: Artur Ciocanu * Make sure test is used for test dependencies. Signed-off-by: Artur Ciocanu * Remove extra Jackson modules. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * reverting pom Signed-off-by: salaboy * fix codestyle Signed-off-by: salaboy * using metaCopy Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Artur Ciocanu Co-authored-by: artur-ciocanu Signed-off-by: salaboy * Bump actions/upload-artifact from 4 to 5 (#1587) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: salaboy * Add gRPC support to Dapr testcontainer (#1586) * Add gRPC support to Dapr testcontainer Signed-off-by: wlfgang * Avoid using null to indicate default value Signed-off-by: wlfgang --------- Signed-off-by: wlfgang Co-authored-by: artur-ciocanu Co-authored-by: wlfgang Signed-off-by: salaboy * Use dependencies BOM and remove duplicates. (#1588) Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * Examples + Docs for App API Token authentication for gRPC and HTTP (#1589) * example Signed-off-by: Cassandra Coyle * docs for example Signed-off-by: Cassandra Coyle --------- Signed-off-by: Cassandra Coyle Signed-off-by: salaboy * Another set of Maven version, properties and plugin improvements (#1596) Signed-off-by: salaboy * Adding a Flux based subscribeToEvents method (#1598) * Adding a Flux based subscribeToEvents method Signed-off-by: Artur Ciocanu * Simplify GRPC stream handling Signed-off-by: Artur Ciocanu * Simplify Javadoc Signed-off-by: Artur Ciocanu * Fix unit tests and simplify implementation Signed-off-by: Artur Ciocanu * Adding event subscriber stream observer to simplify subscription logic Signed-off-by: Artur Ciocanu * Use start() method to start stream subscription Signed-off-by: Artur Ciocanu * Add unit test for event suscriber observer Signed-off-by: Artur Ciocanu * Improve the tests a little bit Signed-off-by: Artur Ciocanu * Remove the unnecessary method Signed-off-by: Artur Ciocanu * Improve error handling and use CloudEvent wrapper Signed-off-by: Artur Ciocanu * Fix unit tests asserts Signed-off-by: Artur Ciocanu * Adjust Java examples for Subscriber Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy * Remove SDK docs due to migration to main Docs repo (#1593) * Remove SDK docs due to migration to main Docs repo Signed-off-by: Marc Duiker * Remove sed lines related to sdk docs Signed-off-by: Marc Duiker --------- Signed-off-by: Marc Duiker Co-authored-by: salaboy Signed-off-by: salaboy * cleaning up sdk version script Signed-off-by: salaboy --------- Signed-off-by: salaboy Signed-off-by: Matheus Cruz Signed-off-by: artur-ciocanu Signed-off-by: Artur Ciocanu Signed-off-by: dependabot[bot] Signed-off-by: wlfgang Signed-off-by: Cassandra Coyle Signed-off-by: Marc Duiker Co-authored-by: Matheus Cruz <56329339+mcruzdev@users.noreply.github.com> Co-authored-by: artur-ciocanu Co-authored-by: Raymundo Zamora Co-authored-by: Siri Varma Vegiraju Co-authored-by: Cassie Coyle Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: wlfgang <14792753+wlfgang@users.noreply.github.com> Co-authored-by: wlfgang Co-authored-by: Marc Duiker Signed-off-by: salaboy --- .github/scripts/update_sdk_version.sh | 18 +- .github/workflows/build.yml | 55 +- durabletask-client/pom.xml | 163 ++ .../CompositeTaskFailedException.java | 68 + .../io/dapr/durabletask/DataConverter.java | 88 + .../dapr/durabletask/DurableTaskClient.java | 346 ++++ .../durabletask/DurableTaskGrpcClient.java | 423 ++++ .../DurableTaskGrpcClientBuilder.java | 128 ++ .../durabletask/DurableTaskGrpcWorker.java | 328 +++ .../DurableTaskGrpcWorkerBuilder.java | 164 ++ .../io/dapr/durabletask/FailureDetails.java | 145 ++ .../java/io/dapr/durabletask/Helpers.java | 77 + .../durabletask/JacksonDataConverter.java | 58 + .../NewOrchestrationInstanceOptions.java | 147 ++ ...NonDeterministicOrchestratorException.java | 20 + .../durabletask/OrchestrationMetadata.java | 283 +++ .../dapr/durabletask/OrchestrationRunner.java | 169 ++ .../OrchestrationRuntimeStatus.java | 118 ++ .../durabletask/OrchestrationStatusQuery.java | 217 ++ .../OrchestrationStatusQueryResult.java | 53 + .../durabletask/OrchestratorFunction.java | 38 + .../durabletask/PurgeInstanceCriteria.java | 125 ++ .../java/io/dapr/durabletask/PurgeResult.java | 37 + .../io/dapr/durabletask/RetryContext.java | 79 + .../io/dapr/durabletask/RetryHandler.java | 31 + .../java/io/dapr/durabletask/RetryPolicy.java | 176 ++ .../main/java/io/dapr/durabletask/Task.java | 91 + .../io/dapr/durabletask/TaskActivity.java | 45 + .../dapr/durabletask/TaskActivityContext.java | 51 + .../durabletask/TaskActivityExecutor.java | 96 + .../dapr/durabletask/TaskActivityFactory.java | 33 + .../durabletask/TaskCanceledException.java | 26 + .../dapr/durabletask/TaskFailedException.java | 76 + .../java/io/dapr/durabletask/TaskOptions.java | 171 ++ .../dapr/durabletask/TaskOrchestration.java | 82 + .../durabletask/TaskOrchestrationContext.java | 598 ++++++ .../TaskOrchestrationExecutor.java | 1515 ++++++++++++++ .../durabletask/TaskOrchestrationFactory.java | 33 + .../durabletask/TaskOrchestratorResult.java | 40 + .../ContinueAsNewInterruption.java | 32 + .../OrchestratorBlockedException.java | 31 + .../dapr/durabletask/util/UuidGenerator.java | 63 + .../dapr/durabletask/DurableTaskClientIT.java | 1785 +++++++++++++++++ .../DurableTaskGrpcClientTlsTest.java | 342 ++++ .../io/dapr/durabletask/ErrorHandlingIT.java | 306 +++ .../dapr/durabletask/IntegrationTestBase.java | 91 + .../io/dapr/durabletask/TaskOptionsTest.java | 142 ++ pom.xml | 25 +- sdk-workflows/pom.xml | 22 +- .../runtime/DefaultWorkflowContext.java | 2 +- .../workflows/DefaultWorkflowContextTest.java | 2 +- 51 files changed, 9199 insertions(+), 55 deletions(-) create mode 100644 durabletask-client/pom.xml create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/Task.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java create mode 100644 durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java create mode 100644 durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java diff --git a/.github/scripts/update_sdk_version.sh b/.github/scripts/update_sdk_version.sh index f11dd3db6b..0e5726ecca 100755 --- a/.github/scripts/update_sdk_version.sh +++ b/.github/scripts/update_sdk_version.sh @@ -8,27 +8,11 @@ DAPR_JAVA_SDK_VERSION=$1 # Replaces the SDK major version to 0 for alpha artifacts. DAPR_JAVA_SDK_ALPHA_VERSION=`echo $DAPR_JAVA_SDK_VERSION | sed 's/^[0-9]*\./0./'` -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION +mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -DprocessDependencies=true mvn versions:set-property -Dproperty=dapr.sdk.alpha.version -DnewVersion=$DAPR_JAVA_SDK_ALPHA_VERSION mvn versions:set-property -Dproperty=dapr.sdk.version -DnewVersion=$DAPR_JAVA_SDK_VERSION mvn versions:set-property -Dproperty=dapr.sdk.version -DnewVersion=$DAPR_JAVA_SDK_VERSION -f sdk-tests/pom.xml mvn versions:set-property -Dproperty=dapr.sdk.alpha.version -DnewVersion=$DAPR_JAVA_SDK_ALPHA_VERSION -f sdk-tests/pom.xml -################### -# Alpha artifacts # -################### - -# sdk-workflows -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -f sdk-workflows/pom.xml - -# testcontainers-dapr -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -f testcontainers-dapr/pom.xml - -# dapr-spring -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -DprocessDependencies=true -f dapr-spring/pom.xml -mvn versions:set-property -Dproperty=dapr.spring.version -DnewVersion=$DAPR_JAVA_SDK_VERSION -f dapr-spring/pom.xml - -# spring-boot-examples -mvn versions:set -DnewVersion=$DAPR_JAVA_SDK_VERSION -f spring-boot-examples/pom.xml git clean -f diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 64f2b6dfd9..1252c515fa 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -44,10 +44,58 @@ jobs: name: report-dapr-java-sdk-actors-jdk${{ env.JDK_VER }} path: sdk-actors/target/jacoco-report/ + build-durabletask: + name: "Durable Task build & tests" + runs-on: ubuntu-latest + timeout-minutes: 30 + continue-on-error: false + env: + JDK_VER: 17 + steps: + - uses: actions/checkout@v5 + - name: Set up OpenJDK ${{ env.JDK_VER }} + uses: actions/setup-java@v4 + with: + distribution: 'temurin' + java-version: ${{ env.JDK_VER }} + - name: Checkout Durable Task Sidecar + uses: actions/checkout@v4 + with: + repository: dapr/durabletask-go + path: durabletask-sidecar + + # TODO: Move the sidecar into a central image repository + - name: Initialize Durable Task Sidecar + run: docker run -d --name durabletask-sidecar -p 4001:4001 --rm -i $(docker build -q ./durabletask-sidecar) + + - name: Display Durable Task Sidecar Logs + run: nohup docker logs --since=0 durabletask-sidecar > durabletask-sidecar.log 2>&1 & + + # wait for 10 seconds, so sidecar container can be fully up, this will avoid intermittent failing issues for integration tests causing by failed to connect to sidecar + - name: Wait for 10 seconds + run: sleep 10 + + - name: Integration Tests For Durable Tasks + run: ./mvnw -B -pl durabletask-client -Pintegration-tests dependency:copy-dependencies verify || echo "TEST_FAILED=true" >> $GITHUB_ENV + continue-on-error: true + + - name: Kill Durable Task Sidecar + run: docker kill durabletask-sidecar + + - name: Upload Durable Task Sidecar Logs + uses: actions/upload-artifact@v4 + with: + name: Durable Task Sidecar Logs + path: durabletask-sidecar.log + + - name: Fail the job if tests failed + if: env.TEST_FAILED == 'true' + run: exit 1 + build: name: "Build jdk:${{ matrix.java }} sb:${{ matrix.spring-boot-display-version }} exp:${{ matrix.experimental }}" runs-on: ubuntu-latest - timeout-minutes: 30 + timeout-minutes: 45 continue-on-error: ${{ matrix.experimental }} strategy: fail-fast: false @@ -149,7 +197,7 @@ jobs: run: ./mvnw clean install -B -q -DskipTests - name: Integration tests using spring boot version ${{ matrix.spring-boot-version }} id: integration_tests - run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -Pintegration-tests dependency:copy-dependencies verify + run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -pl !durabletask-client -Pintegration-tests dependency:copy-dependencies verify - name: Upload failsafe test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} uses: actions/upload-artifact@v5 @@ -163,9 +211,10 @@ jobs: name: surefire-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/surefire-reports + publish: runs-on: ubuntu-latest - needs: [ build, test ] + needs: [ build, test, build-durabletask ] timeout-minutes: 30 env: JDK_VER: 17 diff --git a/durabletask-client/pom.xml b/durabletask-client/pom.xml new file mode 100644 index 0000000000..93bed3255f --- /dev/null +++ b/durabletask-client/pom.xml @@ -0,0 +1,163 @@ + + + 4.0.0 + + io.dapr + dapr-sdk-parent + 1.17.0-SNAPSHOT + + + durabletask-client + + + ${project.build.directory}/generated-sources + ${project.build.directory}/proto + + + + + javax.annotation + javax.annotation-api + provided + + + io.grpc + grpc-protobuf + + + io.grpc + grpc-stub + + + io.grpc + grpc-netty + + + com.google.protobuf + protobuf-java + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + io.grpc + grpc-testing + test + + + org.junit.jupiter + junit-jupiter + test + + + org.testcontainers + testcontainers + + + + + + org.sonatype.plugins + nexus-staging-maven-plugin + + + org.apache.maven.plugins + maven-failsafe-plugin + + ${project.build.outputDirectory} + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.0 + + + getDaprProto + initialize + + wget + + + true + ${durabletask.proto.url} + orchestrator_service.proto + ${protobuf.input.directory} + + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + 0.6.1 + + com.google.protobuf:protoc:${protobuf.version}:exe:${os.detected.classifier} + grpc-java + io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier} + ${protobuf.input.directory} + + + + + compile + compile-custom + + + + + + org.apache.maven.plugins + maven-source-plugin + 3.2.1 + + + attach-sources + + jar-no-fork + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 3.2.0 + + true + + + + attach-javadocs + + jar + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + + true + + + + + diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java b/durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java new file mode 100644 index 0000000000..d57ea37d2d --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/CompositeTaskFailedException.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.util.ArrayList; +import java.util.List; + +/** + * Exception that gets thrown when multiple {@link Task}s for an activity or sub-orchestration fails with an + * unhandled exception. + * + *

Detailed information associated with each task failure can be retrieved using the {@link #getExceptions()} + * method.

+ */ +public class CompositeTaskFailedException extends RuntimeException { + private final List exceptions; + + CompositeTaskFailedException() { + this.exceptions = new ArrayList<>(); + } + + CompositeTaskFailedException(List exceptions) { + this.exceptions = exceptions; + } + + CompositeTaskFailedException(String message, List exceptions) { + super(message); + this.exceptions = exceptions; + } + + CompositeTaskFailedException(String message, Throwable cause, List exceptions) { + super(message, cause); + this.exceptions = exceptions; + } + + CompositeTaskFailedException(Throwable cause, List exceptions) { + super(cause); + this.exceptions = exceptions; + } + + CompositeTaskFailedException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace, + List exceptions) { + super(message, cause, enableSuppression, writableStackTrace); + this.exceptions = exceptions; + } + + /** + * Gets a list of exceptions that occurred during execution of a group of {@link Task}. + * These exceptions include details of the task failure and exception information + * + * @return a list of exceptions + */ + public List getExceptions() { + return new ArrayList<>(this.exceptions); + } + +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java b/durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java new file mode 100644 index 0000000000..3c2dd7b7ec --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DataConverter.java @@ -0,0 +1,88 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.Timestamp; + +import javax.annotation.Nullable; +import java.time.Instant; +import java.time.temporal.ChronoUnit; + +/** + * Interface for serializing and deserializing data that gets passed to and from orchestrators and activities. + * + *

Implementations of this abstract class are free to use any serialization method. Currently, only strings are + * supported as the serialized representation of data. Byte array payloads and streams are not supported by this + * abstraction. Note that these methods all accept null values, in which case the return value should also be null.

+ */ +public interface DataConverter { + /** + * Serializes the input into a text representation. + * + * @param value the value to be serialized + * @return a serialized text representation of the value or null if the value is null + */ + @Nullable + String serialize(@Nullable Object value); + + /** + * Deserializes the given text data into an object of the specified type. + * + * @param data the text data to deserialize into an object + * @param target the target class to deserialize the input into + * @param the generic parameter type representing the target class to deserialize the input into + * @return a deserialized object of type T + * @throws DataConverterException if the text data cannot be deserialized + */ + @Nullable + T deserialize(@Nullable String data, Class target); + + // Data conversion errors are expected to be unrecoverable in most cases, hence an unchecked runtime exception + class DataConverterException extends RuntimeException { + public DataConverterException(String message, Throwable cause) { + super(message, cause); + } + } + + /** + * Convert from Timestamp to Instant. + * + * @param ts timestamp to convert + * @return instant + */ + static Instant getInstantFromTimestamp(Timestamp ts) { + if (ts == null) { + return null; + } + + // We don't include nanoseconds because of serialization round-trip issues + return Instant.ofEpochSecond(ts.getSeconds(), ts.getNanos()).truncatedTo(ChronoUnit.MILLIS); + } + + /** + * Convert from Instant to Timestamp. + * @param instant to convert + * @return timestamp + */ + static Timestamp getTimestampFromInstant(Instant instant) { + if (instant == null) { + return null; + } + + return Timestamp.newBuilder() + .setSeconds(instant.getEpochSecond()) + .setNanos(instant.getNano()) + .build(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java new file mode 100644 index 0000000000..42a98dd556 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskClient.java @@ -0,0 +1,346 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.concurrent.TimeoutException; + +/** + * Base class that defines client operations for managing orchestration instances. + * + *

Instances of this class can be used to start, query, raise events to, and terminate orchestration instances. + * In most cases, methods on this class accept an instance ID as a parameter, which identifies the orchestration + * instance.

+ * + *

At the time of writing, the most common implementation of this class is DurableTaskGrpcClient, + * which works by making gRPC calls to a remote service (e.g. a sidecar) that implements the operation behavior. To + * ensure any owned network resources are properly released, instances of this class should be closed when they are no + * longer needed.

+ * + *

Instances of this class are expected to be safe for multithreaded apps. You can therefore safely cache instances + * of this class and reuse them across multiple contexts. Caching these objects is useful to improve overall + * performance.

+ */ +public abstract class DurableTaskClient implements AutoCloseable { + + /** + * Releases any network resources held by this object. + */ + @Override + public void close() { + // no default implementation + } + + /** + * Schedules a new orchestration instance with a random ID for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @return the randomly-generated instance ID of the scheduled orchestration instance + */ + public String scheduleNewOrchestrationInstance(String orchestratorName) { + return this.scheduleNewOrchestrationInstance(orchestratorName, null, null); + } + + /** + * Schedules a new orchestration instance with a specified input and a random ID for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @param input the input to pass to the scheduled orchestration instance. Must be serializable. + * @return the randomly-generated instance ID of the scheduled orchestration instance + */ + public String scheduleNewOrchestrationInstance(String orchestratorName, Object input) { + return this.scheduleNewOrchestrationInstance(orchestratorName, input, null); + } + + /** + * Schedules a new orchestration instance with a specified input and ID for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @param input the input to pass to the scheduled orchestration instance. Must be serializable. + * @param instanceId the unique ID of the orchestration instance to schedule + * @return the instanceId parameter value + */ + public String scheduleNewOrchestrationInstance(String orchestratorName, Object input, String instanceId) { + NewOrchestrationInstanceOptions options = new NewOrchestrationInstanceOptions() + .setInput(input) + .setInstanceId(instanceId); + return this.scheduleNewOrchestrationInstance(orchestratorName, options); + } + + /** + * Schedules a new orchestration instance with a specified set of options for execution. + * + * @param orchestratorName the name of the orchestrator to schedule + * @param options the options for the new orchestration instance, including input, instance ID, etc. + * @return the ID of the scheduled orchestration instance, which was either provided in options + * or randomly generated + */ + public abstract String scheduleNewOrchestrationInstance( + String orchestratorName, + NewOrchestrationInstanceOptions options); + + /** + * Sends an event notification message to a waiting orchestration instance. + * + *

In order to handle the event, the target orchestration instance must be waiting for an event named + * eventName using the {@link TaskOrchestrationContext#waitForExternalEvent(String)} method. + * If the target orchestration instance is not yet waiting for an event named eventName, + * then the event will be saved in the orchestration instance state and dispatched immediately when the + * orchestrator calls {@link TaskOrchestrationContext#waitForExternalEvent(String)}. This event saving occurs even + * if the orchestrator has canceled its wait operation before the event was received.

+ * + *

Raised events for a completed or non-existent orchestration instance will be silently discarded.

+ * + * @param instanceId the ID of the orchestration instance that will handle the event + * @param eventName the case-insensitive name of the event + */ + public void raiseEvent(String instanceId, String eventName) { + this.raiseEvent(instanceId, eventName, null); + } + + /** + * Sends an event notification message with a payload to a waiting orchestration instance. + * + *

In order to handle the event, the target orchestration instance must be waiting for an event named + * eventName using the {@link TaskOrchestrationContext#waitForExternalEvent(String)} method. + * If the target orchestration instance is not yet waiting for an event named eventName, + * then the event will be saved in the orchestration instance state and dispatched immediately when the + * orchestrator calls {@link TaskOrchestrationContext#waitForExternalEvent(String)}. This event saving occurs even + * if the orchestrator has canceled its wait operation before the event was received.

+ * + *

Raised events for a completed or non-existent orchestration instance will be silently discarded.

+ * + * @param instanceId the ID of the orchestration instance that will handle the event + * @param eventName the case-insensitive name of the event + * @param eventPayload the serializable data payload to include with the event + */ + public abstract void raiseEvent(String instanceId, String eventName, @Nullable Object eventPayload); + + /** + * Fetches orchestration instance metadata from the configured durable store. + * + * @param instanceId the unique ID of the orchestration instance to fetch + * @param getInputsAndOutputs true to fetch the orchestration instance's inputs, outputs, and custom + * status, or false to omit them + * @return a metadata record that describes the orchestration instance and its execution status, or + * a default instance if no such instance is found. Please refer to method + * {@link OrchestrationMetadata#isInstanceFound()} to check if an instance is found. + */ + @Nullable + public abstract OrchestrationMetadata getInstanceMetadata(String instanceId, boolean getInputsAndOutputs); + + /** + * Waits for an orchestration to start running and returns an {@link OrchestrationMetadata} object that contains + * metadata about the started instance. + * + *

A "started" orchestration instance is any instance not in the Pending state.

+ * + *

If an orchestration instance is already running when this method is called, the method will return immediately. + *

+ * + *

Note that this method overload will not fetch the orchestration's inputs, outputs, or custom status payloads. + *

+ * + * @param instanceId the unique ID of the orchestration instance to wait for + * @param timeout the amount of time to wait for the orchestration instance to start + * @return the orchestration instance metadata or null if no such instance is found + * @throws TimeoutException when the orchestration instance is not started within the specified amount of time + */ + @Nullable + public OrchestrationMetadata waitForInstanceStart(String instanceId, Duration timeout) throws TimeoutException { + return this.waitForInstanceStart(instanceId, timeout, false); + } + + /** + * Waits for an orchestration to start running and returns an {@link OrchestrationMetadata} object that contains + * metadata about the started instance and optionally its input, output, and custom status payloads. + * + *

A "started" orchestration instance is any instance not in the Pending state.

+ * + *

If an orchestration instance is already running when this method is called, the method will return immediately. + *

+ * + * @param instanceId the unique ID of the orchestration instance to wait for + * @param timeout the amount of time to wait for the orchestration instance to start + * @param getInputsAndOutputs true to fetch the orchestration instance's inputs, outputs, and custom + * status, or false to omit them + * @return the orchestration instance metadata or null if no such instance is found + * @throws TimeoutException when the orchestration instance is not started within the specified amount of time + */ + @Nullable + public abstract OrchestrationMetadata waitForInstanceStart( + String instanceId, + Duration timeout, + boolean getInputsAndOutputs) throws TimeoutException; + + /** + * Waits for an orchestration to complete and returns an {@link OrchestrationMetadata} object that contains + * metadata about the completed instance. + * + *

A "completed" orchestration instance is any instance in one of the terminal states. For example, the + * Completed, Failed, or Terminated states.

+ * + *

Orchestrations are long-running and could take hours, days, or months before completing. + * Orchestrations can also be eternal, in which case they'll never complete unless terminated. + * In such cases, this call may block indefinitely, so care must be taken to ensure appropriate timeouts are used. + *

+ * + *

If an orchestration instance is already complete when this method is called, the method will return immediately. + *

+ * @param instanceId the unique ID of the orchestration instance to wait for + * @param timeout the amount of time to wait for the orchestration instance to complete + * @param getInputsAndOutputs true to fetch the orchestration instance's inputs, outputs, and custom + * status, or false to omit them + * @return the orchestration instance metadata or null if no such instance is found + * @throws TimeoutException when the orchestration instance is not completed within the specified amount of time + */ + @Nullable + public abstract OrchestrationMetadata waitForInstanceCompletion( + String instanceId, + Duration timeout, + boolean getInputsAndOutputs) throws TimeoutException; + + /** + * Terminates a running orchestration instance and updates its runtime status to Terminated. + * + *

This method internally enqueues a "terminate" message in the task hub. When the task hub worker processes + * this message, it will update the runtime status of the target instance to Terminated. + * You can use the {@link #waitForInstanceCompletion} to wait for the instance to reach the terminated state. + *

+ * + *

Terminating an orchestration instance has no effect on any in-flight activity function executions + * or sub-orchestrations that were started by the terminated instance. Those actions will continue to run + * without interruption. However, their results will be discarded. If you want to terminate sub-orchestrations, + * you must issue separate terminate commands for each sub-orchestration instance.

+ * + *

At the time of writing, there is no way to terminate an in-flight activity execution.

+ * + *

Attempting to terminate a completed or non-existent orchestration instance will fail silently.

+ * + * @param instanceId the unique ID of the orchestration instance to terminate + * @param output the optional output to set for the terminated orchestration instance. + * This value must be serializable. + */ + public abstract void terminate(String instanceId, @Nullable Object output); + + /** + * Fetches orchestration instance metadata from the configured durable store using a status query filter. + * + * @param query filter criteria that determines which orchestrations to fetch data for. + * @return the result of the query operation, including instance metadata and possibly a continuation token + */ + public abstract OrchestrationStatusQueryResult queryInstances(OrchestrationStatusQuery query); + + /** + * Initializes the target task hub data store. + * + *

This is an administrative operation that only needs to be done once for the lifetime of the task hub.

+ * + * @param recreateIfExists true to delete any existing task hub first; false to make this + * operation a no-op if the task hub data store already exists. Note that deleting a task + * hub will result in permanent data loss. Use this operation with care. + */ + public abstract void createTaskHub(boolean recreateIfExists); + + /** + * Permanently deletes the target task hub data store and any orchestration data it may contain. + * + *

This is an administrative operation that is irreversible. It should be used with great care.

+ */ + public abstract void deleteTaskHub(); + + /** + * Purges orchestration instance metadata from the durable store. + * + *

This method can be used to permanently delete orchestration metadata from the underlying storage provider, + * including any stored inputs, outputs, and orchestration history records. This is often useful for implementing + * data retention policies and for keeping storage costs minimal. Only orchestration instances in the + * Completed, Failed, or Terminated state can be purged.

+ * + *

If the target orchestration instance is not found in the data store, or if the instance is found but not in a + * terminal state, then the returned {@link PurgeResult} will report that zero instances were purged. + * Otherwise, the existing data will be purged and the returned {@link PurgeResult} will report that one instance + * was purged.

+ * + * @param instanceId the unique ID of the orchestration instance to purge + * @return the result of the purge operation, including the number of purged orchestration instances (0 or 1) + */ + public abstract PurgeResult purgeInstance(String instanceId); + + /** + * Purges orchestration instance metadata from the durable store using a filter that determines which instances to + * purge data for. + * + *

This method can be used to permanently delete orchestration metadata from the underlying storage provider, + * including any stored inputs, outputs, and orchestration history records. This is often useful for implementing + * data retention policies and for keeping storage costs minimal. Only orchestration instances in the + * Completed, Failed, or Terminated state can be purged.

+ * + *

Depending on the type of the durable store, purge operations that target multiple orchestration instances may + * take a long time to complete and be resource intensive. It may therefore be useful to break up purge operations + * into multiple method calls over a period of time and have them cover smaller time windows.

+ * + * @param purgeInstanceCriteria orchestration instance filter criteria used to determine which instances to purge + * @return the result of the purge operation, including the number of purged orchestration instances (0 or 1) + * @throws TimeoutException when purging instances is not completed within the specified amount of time. + * The default timeout for purging instances is 10 minutes + */ + public abstract PurgeResult purgeInstances(PurgeInstanceCriteria purgeInstanceCriteria) throws TimeoutException; + + /** + * Restarts an existing orchestration instance with the original input. + * + * @param instanceId the ID of the previously run orchestration instance to restart. + * @param restartWithNewInstanceId true to restart the orchestration instance with a new instance ID + * false to restart the orchestration instance with same instance ID + * @return the ID of the scheduled orchestration instance, which is either instanceId or randomly + * generated depending on the value of restartWithNewInstanceId + */ + public abstract String restartInstance(String instanceId, boolean restartWithNewInstanceId); + + /** + * Suspends a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to suspend + */ + public void suspendInstance(String instanceId) { + this.suspendInstance(instanceId, null); + } + + /** + * Suspends a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to suspend + * @param reason the reason for suspending the orchestration instance + */ + public abstract void suspendInstance(String instanceId, @Nullable String reason); + + /** + * Resumes a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to resume + */ + public void resumeInstance(String instanceId) { + this.resumeInstance(instanceId, null); + } + + /** + * Resumes a running orchestration instance. + * + * @param instanceId the ID of the orchestration instance to resume + * @param reason the reason for resuming the orchestration instance + */ + public abstract void resumeInstance(String instanceId, @Nullable String reason); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java new file mode 100644 index 0000000000..b0fa24a5e9 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClient.java @@ -0,0 +1,423 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import com.google.protobuf.Timestamp; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.TaskHubSidecarServiceGrpc; +import io.grpc.Channel; +import io.grpc.ChannelCredentials; +import io.grpc.Grpc; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import io.grpc.TlsChannelCredentials; +import io.grpc.netty.GrpcSslContexts; +import io.grpc.netty.NettyChannelBuilder; +import io.netty.handler.ssl.util.InsecureTrustManagerFactory; + +import javax.annotation.Nullable; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.logging.Logger; + +/** + * Durable Task client implementation that uses gRPC to connect to a remote "sidecar" process. + */ +public final class DurableTaskGrpcClient extends DurableTaskClient { + private static final int DEFAULT_PORT = 4001; + private static final Logger logger = Logger.getLogger(DurableTaskGrpcClient.class.getPackage().getName()); + private static final String GRPC_TLS_CA_PATH = "DAPR_GRPC_TLS_CA_PATH"; + private static final String GRPC_TLS_CERT_PATH = "DAPR_GRPC_TLS_CERT_PATH"; + private static final String GRPC_TLS_KEY_PATH = "DAPR_GRPC_TLS_KEY_PATH"; + private static final String GRPC_TLS_INSECURE = "DAPR_GRPC_TLS_INSECURE"; + + private final DataConverter dataConverter; + private final ManagedChannel managedSidecarChannel; + private final TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub sidecarClient; + + DurableTaskGrpcClient(DurableTaskGrpcClientBuilder builder) { + this.dataConverter = builder.dataConverter != null ? builder.dataConverter : new JacksonDataConverter(); + + Channel sidecarGrpcChannel; + if (builder.channel != null) { + // The caller is responsible for managing the channel lifetime + this.managedSidecarChannel = null; + sidecarGrpcChannel = builder.channel; + } else { + // Construct our own channel using localhost + a port number + int port = DEFAULT_PORT; + if (builder.port > 0) { + port = builder.port; + } + + String endpoint = "localhost:" + port; + ManagedChannelBuilder channelBuilder; + + // Get TLS configuration from builder or environment variables + String tlsCaPath = builder.tlsCaPath != null ? builder.tlsCaPath : System.getenv(GRPC_TLS_CA_PATH); + String tlsCertPath = builder.tlsCertPath != null ? builder.tlsCertPath : System.getenv(GRPC_TLS_CERT_PATH); + String tlsKeyPath = builder.tlsKeyPath != null ? builder.tlsKeyPath : System.getenv(GRPC_TLS_KEY_PATH); + boolean insecure = builder.insecure || Boolean.parseBoolean(System.getenv(GRPC_TLS_INSECURE)); + + if (insecure) { + // Insecure mode - uses TLS but doesn't verify certificates + try { + channelBuilder = NettyChannelBuilder.forTarget(endpoint) + .sslContext(GrpcSslContexts.forClient() + .trustManager(InsecureTrustManagerFactory.INSTANCE) + .build()); + } catch (Exception e) { + throw new RuntimeException("Failed to create insecure TLS credentials", e); + } + } else if (tlsCertPath != null && tlsKeyPath != null) { + // mTLS case - using client cert and key, with optional CA cert for server authentication + try ( + InputStream clientCertInputStream = new FileInputStream(tlsCertPath); + InputStream clientKeyInputStream = new FileInputStream(tlsKeyPath); + InputStream caCertInputStream = tlsCaPath != null ? new FileInputStream(tlsCaPath) : null + ) { + TlsChannelCredentials.Builder tlsBuilder = TlsChannelCredentials.newBuilder() + .keyManager(clientCertInputStream, clientKeyInputStream); // For client authentication + if (caCertInputStream != null) { + tlsBuilder.trustManager(caCertInputStream); // For server authentication + } + ChannelCredentials credentials = tlsBuilder.build(); + channelBuilder = Grpc.newChannelBuilder(endpoint, credentials); + } catch (IOException e) { + throw new RuntimeException("Failed to create mTLS credentials" + + (tlsCaPath != null ? " with CA cert" : ""), e); + } + } else if (tlsCaPath != null) { + // Simple TLS case - using CA cert only for server authentication + try (InputStream caCertInputStream = new FileInputStream(tlsCaPath)) { + ChannelCredentials credentials = TlsChannelCredentials.newBuilder() + .trustManager(caCertInputStream) + .build(); + channelBuilder = Grpc.newChannelBuilder(endpoint, credentials); + } catch (IOException e) { + throw new RuntimeException("Failed to create TLS credentials with CA cert", e); + } + } else { + // No TLS config provided, use plaintext + channelBuilder = ManagedChannelBuilder.forTarget(endpoint).usePlaintext(); + } + + // Need to keep track of this channel so we can dispose it on close() + this.managedSidecarChannel = channelBuilder.build(); + sidecarGrpcChannel = this.managedSidecarChannel; + } + + this.sidecarClient = TaskHubSidecarServiceGrpc.newBlockingStub(sidecarGrpcChannel); + } + + /** + * Closes the internally managed gRPC channel, if one exists. + * + *

This method is a no-op if this client object was created using a builder with a gRPC channel object explicitly + * configured.

+ */ + @Override + public void close() { + if (this.managedSidecarChannel != null) { + try { + this.managedSidecarChannel.shutdown().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // Best effort. Also note that AutoClose documentation recommends NOT having + // close() methods throw InterruptedException: + // https://docs.oracle.com/javase/7/docs/api/java/lang/AutoCloseable.html + } + } + } + + @Override + public String scheduleNewOrchestrationInstance( + String orchestratorName, + NewOrchestrationInstanceOptions options) { + if (orchestratorName == null || orchestratorName.length() == 0) { + throw new IllegalArgumentException("A non-empty orchestrator name must be specified."); + } + + Helpers.throwIfArgumentNull(options, "options"); + + OrchestratorService.CreateInstanceRequest.Builder builder = OrchestratorService.CreateInstanceRequest.newBuilder(); + builder.setName(orchestratorName); + + String instanceId = options.getInstanceId(); + if (instanceId == null) { + instanceId = UUID.randomUUID().toString(); + } + builder.setInstanceId(instanceId); + + String version = options.getVersion(); + if (version != null) { + builder.setVersion(StringValue.of(version)); + } + + Object input = options.getInput(); + if (input != null) { + String serializedInput = this.dataConverter.serialize(input); + builder.setInput(StringValue.of(serializedInput)); + } + + Instant startTime = options.getStartTime(); + if (startTime != null) { + Timestamp ts = DataConverter.getTimestampFromInstant(startTime); + builder.setScheduledStartTimestamp(ts); + } + + OrchestratorService.CreateInstanceRequest request = builder.build(); + OrchestratorService.CreateInstanceResponse response = this.sidecarClient.startInstance(request); + return response.getInstanceId(); + } + + @Override + public void raiseEvent(String instanceId, String eventName, Object eventPayload) { + Helpers.throwIfArgumentNull(instanceId, "instanceId"); + Helpers.throwIfArgumentNull(eventName, "eventName"); + + OrchestratorService.RaiseEventRequest.Builder builder = OrchestratorService.RaiseEventRequest.newBuilder() + .setInstanceId(instanceId) + .setName(eventName); + if (eventPayload != null) { + String serializedPayload = this.dataConverter.serialize(eventPayload); + builder.setInput(StringValue.of(serializedPayload)); + } + + OrchestratorService.RaiseEventRequest request = builder.build(); + this.sidecarClient.raiseEvent(request); + } + + @Override + public OrchestrationMetadata getInstanceMetadata(String instanceId, boolean getInputsAndOutputs) { + OrchestratorService.GetInstanceRequest request = OrchestratorService.GetInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .setGetInputsAndOutputs(getInputsAndOutputs) + .build(); + OrchestratorService.GetInstanceResponse response = this.sidecarClient.getInstance(request); + return new OrchestrationMetadata(response, this.dataConverter, request.getGetInputsAndOutputs()); + } + + @Override + public OrchestrationMetadata waitForInstanceStart(String instanceId, Duration timeout, boolean getInputsAndOutputs) + throws TimeoutException { + OrchestratorService.GetInstanceRequest request = OrchestratorService.GetInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .setGetInputsAndOutputs(getInputsAndOutputs) + .build(); + + if (timeout == null || timeout.isNegative() || timeout.isZero()) { + timeout = Duration.ofMinutes(10); + } + + TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub grpcClient = this.sidecarClient.withDeadlineAfter( + timeout.toMillis(), + TimeUnit.MILLISECONDS); + + OrchestratorService.GetInstanceResponse response; + try { + response = grpcClient.waitForInstanceStart(request); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.DEADLINE_EXCEEDED) { + throw new TimeoutException("Start orchestration timeout reached."); + } + throw e; + } + return new OrchestrationMetadata(response, this.dataConverter, request.getGetInputsAndOutputs()); + } + + @Override + public OrchestrationMetadata waitForInstanceCompletion(String instanceId, Duration timeout, + boolean getInputsAndOutputs) throws TimeoutException { + OrchestratorService.GetInstanceRequest request = OrchestratorService.GetInstanceRequest.newBuilder() + .setInstanceId(instanceId) + .setGetInputsAndOutputs(getInputsAndOutputs) + .build(); + + if (timeout == null || timeout.isNegative() || timeout.isZero()) { + timeout = Duration.ofMinutes(10); + } + + TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub grpcClient = this.sidecarClient.withDeadlineAfter( + timeout.toMillis(), + TimeUnit.MILLISECONDS); + + OrchestratorService.GetInstanceResponse response; + try { + response = grpcClient.waitForInstanceCompletion(request); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.DEADLINE_EXCEEDED) { + throw new TimeoutException("Orchestration instance completion timeout reached."); + } + throw e; + } + return new OrchestrationMetadata(response, this.dataConverter, request.getGetInputsAndOutputs()); + } + + @Override + public void terminate(String instanceId, @Nullable Object output) { + Helpers.throwIfArgumentNull(instanceId, "instanceId"); + String serializeOutput = this.dataConverter.serialize(output); + this.logger.fine(() -> String.format( + "Terminating instance %s and setting output to: %s", + instanceId, + serializeOutput != null ? serializeOutput : "(null)")); + OrchestratorService.TerminateRequest.Builder builder = OrchestratorService.TerminateRequest.newBuilder() + .setInstanceId(instanceId); + if (serializeOutput != null) { + builder.setOutput(StringValue.of(serializeOutput)); + } + this.sidecarClient.terminateInstance(builder.build()); + } + + @Override + public OrchestrationStatusQueryResult queryInstances(OrchestrationStatusQuery query) { + OrchestratorService.InstanceQuery.Builder instanceQueryBuilder = OrchestratorService.InstanceQuery.newBuilder(); + Optional.ofNullable(query.getCreatedTimeFrom()).ifPresent(createdTimeFrom -> + instanceQueryBuilder.setCreatedTimeFrom(DataConverter.getTimestampFromInstant(createdTimeFrom))); + Optional.ofNullable(query.getCreatedTimeTo()).ifPresent(createdTimeTo -> + instanceQueryBuilder.setCreatedTimeTo(DataConverter.getTimestampFromInstant(createdTimeTo))); + Optional.ofNullable(query.getContinuationToken()).ifPresent(token -> + instanceQueryBuilder.setContinuationToken(StringValue.of(token))); + Optional.ofNullable(query.getInstanceIdPrefix()).ifPresent(prefix -> + instanceQueryBuilder.setInstanceIdPrefix(StringValue.of(prefix))); + instanceQueryBuilder.setFetchInputsAndOutputs(query.isFetchInputsAndOutputs()); + instanceQueryBuilder.setMaxInstanceCount(query.getMaxInstanceCount()); + query.getRuntimeStatusList().forEach(runtimeStatus -> + Optional.ofNullable(runtimeStatus).ifPresent(status -> + instanceQueryBuilder.addRuntimeStatus(OrchestrationRuntimeStatus.toProtobuf(status)))); + query.getTaskHubNames().forEach(taskHubName -> Optional.ofNullable(taskHubName).ifPresent(name -> + instanceQueryBuilder.addTaskHubNames(StringValue.of(name)))); + OrchestratorService.QueryInstancesResponse queryInstancesResponse = this.sidecarClient + .queryInstances(OrchestratorService.QueryInstancesRequest.newBuilder().setQuery(instanceQueryBuilder).build()); + return toQueryResult(queryInstancesResponse, query.isFetchInputsAndOutputs()); + } + + private OrchestrationStatusQueryResult toQueryResult( + OrchestratorService.QueryInstancesResponse queryInstancesResponse, boolean fetchInputsAndOutputs) { + List metadataList = new ArrayList<>(); + queryInstancesResponse.getOrchestrationStateList().forEach(state -> { + metadataList.add(new OrchestrationMetadata(state, this.dataConverter, fetchInputsAndOutputs)); + }); + return new OrchestrationStatusQueryResult(metadataList, queryInstancesResponse.getContinuationToken().getValue()); + } + + @Override + public void createTaskHub(boolean recreateIfExists) { + this.sidecarClient.createTaskHub(OrchestratorService.CreateTaskHubRequest.newBuilder() + .setRecreateIfExists(recreateIfExists).build()); + } + + @Override + public void deleteTaskHub() { + this.sidecarClient.deleteTaskHub(OrchestratorService.DeleteTaskHubRequest.newBuilder().build()); + } + + @Override + public PurgeResult purgeInstance(String instanceId) { + OrchestratorService.PurgeInstancesRequest request = OrchestratorService.PurgeInstancesRequest.newBuilder() + .setInstanceId(instanceId) + .build(); + + OrchestratorService.PurgeInstancesResponse response = this.sidecarClient.purgeInstances(request); + return toPurgeResult(response); + } + + @Override + public PurgeResult purgeInstances(PurgeInstanceCriteria purgeInstanceCriteria) throws TimeoutException { + OrchestratorService.PurgeInstanceFilter.Builder builder = OrchestratorService.PurgeInstanceFilter.newBuilder(); + builder.setCreatedTimeFrom(DataConverter.getTimestampFromInstant(purgeInstanceCriteria.getCreatedTimeFrom())); + Optional.ofNullable(purgeInstanceCriteria.getCreatedTimeTo()).ifPresent(createdTimeTo -> + builder.setCreatedTimeTo(DataConverter.getTimestampFromInstant(createdTimeTo))); + purgeInstanceCriteria.getRuntimeStatusList().forEach(runtimeStatus -> + Optional.ofNullable(runtimeStatus).ifPresent(status -> + builder.addRuntimeStatus(OrchestrationRuntimeStatus.toProtobuf(status)))); + + Duration timeout = purgeInstanceCriteria.getTimeout(); + if (timeout == null || timeout.isNegative() || timeout.isZero()) { + timeout = Duration.ofMinutes(4); + } + + TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub grpcClient = this.sidecarClient.withDeadlineAfter( + timeout.toMillis(), + TimeUnit.MILLISECONDS); + + OrchestratorService.PurgeInstancesResponse response; + try { + response = grpcClient.purgeInstances(OrchestratorService.PurgeInstancesRequest.newBuilder() + .setPurgeInstanceFilter(builder).build()); + return toPurgeResult(response); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.DEADLINE_EXCEEDED) { + String timeOutException = String.format("Purge instances timeout duration of %s reached.", timeout); + throw new TimeoutException(timeOutException); + } + throw e; + } + } + + @Override + public void suspendInstance(String instanceId, @Nullable String reason) { + OrchestratorService.SuspendRequest.Builder suspendRequestBuilder = OrchestratorService.SuspendRequest.newBuilder(); + suspendRequestBuilder.setInstanceId(instanceId); + if (reason != null) { + suspendRequestBuilder.setReason(StringValue.of(reason)); + } + this.sidecarClient.suspendInstance(suspendRequestBuilder.build()); + } + + @Override + public void resumeInstance(String instanceId, @Nullable String reason) { + OrchestratorService.ResumeRequest.Builder resumeRequestBuilder = OrchestratorService.ResumeRequest.newBuilder(); + resumeRequestBuilder.setInstanceId(instanceId); + if (reason != null) { + resumeRequestBuilder.setReason(StringValue.of(reason)); + } + this.sidecarClient.resumeInstance(resumeRequestBuilder.build()); + } + + @Override + public String restartInstance(String instanceId, boolean restartWithNewInstanceId) { + OrchestrationMetadata metadata = this.getInstanceMetadata(instanceId, true); + if (!metadata.isInstanceFound()) { + throw new IllegalArgumentException(new StringBuilder() + .append("An orchestration with instanceId ") + .append(instanceId) + .append(" was not found.").toString()); + } + + if (restartWithNewInstanceId) { + return this.scheduleNewOrchestrationInstance(metadata.getName(), + this.dataConverter.deserialize(metadata.getSerializedInput(), Object.class)); + } else { + return this.scheduleNewOrchestrationInstance(metadata.getName(), + this.dataConverter.deserialize(metadata.getSerializedInput(), Object.class), metadata.getInstanceId()); + } + } + + private PurgeResult toPurgeResult(OrchestratorService.PurgeInstancesResponse response) { + return new PurgeResult(response.getDeletedInstanceCount()); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java new file mode 100644 index 0000000000..f3ba1cd82a --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcClientBuilder.java @@ -0,0 +1,128 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.grpc.Channel; + +/** + * Builder class for constructing new {@link DurableTaskClient} objects that communicate with a sidecar process + * over gRPC. + */ +public final class DurableTaskGrpcClientBuilder { + DataConverter dataConverter; + int port; + Channel channel; + String tlsCaPath; + String tlsCertPath; + String tlsKeyPath; + boolean insecure; + + /** + * Sets the {@link DataConverter} to use for converting serializable data payloads. + * + * @param dataConverter the {@link DataConverter} to use for converting serializable data payloads + * @return this builder object + */ + public DurableTaskGrpcClientBuilder dataConverter(DataConverter dataConverter) { + this.dataConverter = dataConverter; + return this; + } + + /** + * Sets the gRPC channel to use for communicating with the sidecar process. + * + *

This builder method allows you to provide your own gRPC channel for communicating with the Durable Task sidecar + * endpoint. Channels provided using this method won't be closed when the client is closed. + * Rather, the caller remains responsible for shutting down the channel after disposing the client.

+ * + *

If not specified, a gRPC channel will be created automatically for each constructed + * {@link DurableTaskClient}.

+ * + * @param channel the gRPC channel to use + * @return this builder object + */ + public DurableTaskGrpcClientBuilder grpcChannel(Channel channel) { + this.channel = channel; + return this; + } + + /** + * Sets the gRPC endpoint port to connect to. If not specified, the default Durable Task port number will be used. + * + * @param port the gRPC endpoint port to connect to + * @return this builder object + */ + public DurableTaskGrpcClientBuilder port(int port) { + this.port = port; + return this; + } + + /** + * Sets the path to the TLS CA certificate file for server authentication. + * If not set, the system's default CA certificates will be used. + * + * @param tlsCaPath path to the TLS CA certificate file + * @return this builder object + */ + public DurableTaskGrpcClientBuilder tlsCaPath(String tlsCaPath) { + this.tlsCaPath = tlsCaPath; + return this; + } + + /** + * Sets the path to the TLS client certificate file for client authentication. + * This is used for mTLS (mutual TLS) connections. + * + * @param tlsCertPath path to the TLS client certificate file + * @return this builder object + */ + public DurableTaskGrpcClientBuilder tlsCertPath(String tlsCertPath) { + this.tlsCertPath = tlsCertPath; + return this; + } + + /** + * Sets the path to the TLS client key file for client authentication. + * This is used for mTLS (mutual TLS) connections. + * + * @param tlsKeyPath path to the TLS client key file + * @return this builder object + */ + public DurableTaskGrpcClientBuilder tlsKeyPath(String tlsKeyPath) { + this.tlsKeyPath = tlsKeyPath; + return this; + } + + /** + * Sets whether to use insecure (plaintext) mode for gRPC communication. + * When set to true, TLS will be disabled and communication will be unencrypted. + * This should only be used for development/testing. + * + * @param insecure whether to use insecure mode + * @return this builder object + */ + public DurableTaskGrpcClientBuilder insecure(boolean insecure) { + this.insecure = insecure; + return this; + } + + /** + * Initializes a new {@link DurableTaskClient} object with the settings specified in the current builder object. + * + * @return a new {@link DurableTaskClient} object + */ + public DurableTaskClient build() { + return new DurableTaskGrpcClient(this); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java new file mode 100644 index 0000000000..eb3be6bb9a --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorker.java @@ -0,0 +1,328 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.TaskFailureDetails; +import io.dapr.durabletask.implementation.protobuf.TaskHubSidecarServiceGrpc; +import io.grpc.Channel; +import io.grpc.ManagedChannel; +import io.grpc.ManagedChannelBuilder; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; + +import java.time.Duration; +import java.util.HashMap; +import java.util.Iterator; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Task hub worker that connects to a sidecar process over gRPC to execute + * orchestrator and activity events. + */ +public final class DurableTaskGrpcWorker implements AutoCloseable { + + private static final int DEFAULT_PORT = 4001; + private static final Logger logger = Logger.getLogger(DurableTaskGrpcWorker.class.getPackage().getName()); + private static final Duration DEFAULT_MAXIMUM_TIMER_INTERVAL = Duration.ofDays(3); + + private final HashMap orchestrationFactories = new HashMap<>(); + private final HashMap activityFactories = new HashMap<>(); + + private final ManagedChannel managedSidecarChannel; + private final DataConverter dataConverter; + private final Duration maximumTimerInterval; + private final ExecutorService workerPool; + private final String appId; // App ID for cross-app routing + + private final TaskHubSidecarServiceGrpc.TaskHubSidecarServiceBlockingStub sidecarClient; + private final boolean isExecutorServiceManaged; + private volatile boolean isNormalShutdown = false; + private Thread workerThread; + + DurableTaskGrpcWorker(DurableTaskGrpcWorkerBuilder builder) { + this.orchestrationFactories.putAll(builder.orchestrationFactories); + this.activityFactories.putAll(builder.activityFactories); + this.appId = builder.appId; + + Channel sidecarGrpcChannel; + if (builder.channel != null) { + // The caller is responsible for managing the channel lifetime + this.managedSidecarChannel = null; + sidecarGrpcChannel = builder.channel; + } else { + // Construct our own channel using localhost + a port number + int port = DEFAULT_PORT; + if (builder.port > 0) { + port = builder.port; + } + + // Need to keep track of this channel so we can dispose it on close() + this.managedSidecarChannel = ManagedChannelBuilder + .forAddress("localhost", port) + .usePlaintext() + .build(); + sidecarGrpcChannel = this.managedSidecarChannel; + } + + this.sidecarClient = TaskHubSidecarServiceGrpc.newBlockingStub(sidecarGrpcChannel); + this.dataConverter = builder.dataConverter != null ? builder.dataConverter : new JacksonDataConverter(); + this.maximumTimerInterval = builder.maximumTimerInterval != null ? builder.maximumTimerInterval + : DEFAULT_MAXIMUM_TIMER_INTERVAL; + this.workerPool = builder.executorService != null ? builder.executorService : Executors.newCachedThreadPool(); + this.isExecutorServiceManaged = builder.executorService == null; + } + + /** + * Establishes a gRPC connection to the sidecar and starts processing work-items + * in the background. + * + *

This method retries continuously to establish a connection to the sidecar. If + * a connection fails, + * a warning log message will be written and a new connection attempt will be + * made. This process + * continues until either a connection succeeds or the process receives an + * interrupt signal.

+ */ + public void start() { + this.workerThread = new Thread(this::startAndBlock); + this.workerThread.start(); + } + + /** + * Closes the internally managed gRPC channel and executor service, if one + * exists. + * + *

Only the internally managed GRPC Channel and Executor services are closed. If + * any of them are supplied, + * it is the responsibility of the supplier to take care of them.

+ * + */ + public void close() { + this.workerThread.interrupt(); + this.isNormalShutdown = true; + this.shutDownWorkerPool(); + this.closeSideCarChannel(); + } + + /** + * Establishes a gRPC connection to the sidecar and starts processing work-items + * on the current thread. + * This method call blocks indefinitely, or until the current thread is + * interrupted. + * + *

Use can alternatively use the {@link #start} method to run orchestration + * processing in a background thread.

+ * + *

This method retries continuously to establish a connection to the sidecar. If + * a connection fails, + * a warning log message will be written and a new connection attempt will be + * made. This process + * continues until either a connection succeeds or the process receives an + * interrupt signal.

+ */ + public void startAndBlock() { + logger.log(Level.INFO, "Durable Task worker is connecting to sidecar at {0}.", this.getSidecarAddress()); + + TaskOrchestrationExecutor taskOrchestrationExecutor = new TaskOrchestrationExecutor( + this.orchestrationFactories, + this.dataConverter, + this.maximumTimerInterval, + logger, + this.appId); + TaskActivityExecutor taskActivityExecutor = new TaskActivityExecutor( + this.activityFactories, + this.dataConverter, + logger); + + while (true) { + try { + OrchestratorService.GetWorkItemsRequest getWorkItemsRequest = OrchestratorService.GetWorkItemsRequest + .newBuilder().build(); + Iterator workItemStream = this.sidecarClient.getWorkItems(getWorkItemsRequest); + while (workItemStream.hasNext()) { + OrchestratorService.WorkItem workItem = workItemStream.next(); + OrchestratorService.WorkItem.RequestCase requestType = workItem.getRequestCase(); + if (requestType == OrchestratorService.WorkItem.RequestCase.ORCHESTRATORREQUEST) { + OrchestratorService.OrchestratorRequest orchestratorRequest = workItem.getOrchestratorRequest(); + logger.log(Level.FINEST, + String.format("Processing orchestrator request for instance: {0}", + orchestratorRequest.getInstanceId())); + + // TODO: Error handling + this.workerPool.submit(() -> { + TaskOrchestratorResult taskOrchestratorResult = taskOrchestrationExecutor.execute( + orchestratorRequest.getPastEventsList(), + orchestratorRequest.getNewEventsList()); + + OrchestratorService.OrchestratorResponse response = OrchestratorService.OrchestratorResponse.newBuilder() + .setInstanceId(orchestratorRequest.getInstanceId()) + .addAllActions(taskOrchestratorResult.getActions()) + .setCustomStatus(StringValue.of(taskOrchestratorResult.getCustomStatus())) + .setCompletionToken(workItem.getCompletionToken()) + .build(); + + try { + this.sidecarClient.completeOrchestratorTask(response); + logger.log(Level.FINEST, + "Completed orchestrator request for instance: {0}", + orchestratorRequest.getInstanceId()); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) { + logger.log(Level.WARNING, + "The sidecar at address {0} is unavailable while completing the orchestrator task.", + this.getSidecarAddress()); + } else if (e.getStatus().getCode() == Status.Code.CANCELLED) { + logger.log(Level.WARNING, + "Durable Task worker has disconnected from {0} while completing the orchestrator task.", + this.getSidecarAddress()); + } else { + logger.log(Level.WARNING, + "Unexpected failure completing the orchestrator task at {0}.", + this.getSidecarAddress()); + } + } + }); + } else if (requestType == OrchestratorService.WorkItem.RequestCase.ACTIVITYREQUEST) { + OrchestratorService.ActivityRequest activityRequest = workItem.getActivityRequest(); + logger.log(Level.FINEST, + String.format("Processing activity request: %s for instance: %s}", + activityRequest.getName(), + activityRequest.getOrchestrationInstance().getInstanceId())); + + // TODO: Error handling + this.workerPool.submit(() -> { + String output = null; + TaskFailureDetails failureDetails = null; + try { + output = taskActivityExecutor.execute( + activityRequest.getName(), + activityRequest.getInput().getValue(), + activityRequest.getTaskExecutionId(), + activityRequest.getTaskId()); + } catch (Throwable e) { + failureDetails = TaskFailureDetails.newBuilder() + .setErrorType(e.getClass().getName()) + .setErrorMessage(e.getMessage()) + .setStackTrace(StringValue.of(FailureDetails.getFullStackTrace(e))) + .build(); + } + + OrchestratorService.ActivityResponse.Builder responseBuilder = OrchestratorService.ActivityResponse + .newBuilder() + .setInstanceId(activityRequest.getOrchestrationInstance().getInstanceId()) + .setTaskId(activityRequest.getTaskId()) + .setCompletionToken(workItem.getCompletionToken()); + + if (output != null) { + responseBuilder.setResult(StringValue.of(output)); + } + + if (failureDetails != null) { + responseBuilder.setFailureDetails(failureDetails); + } + + try { + this.sidecarClient.completeActivityTask(responseBuilder.build()); + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) { + logger.log(Level.WARNING, + "The sidecar at address {0} is unavailable while completing the activity task.", + this.getSidecarAddress()); + } else if (e.getStatus().getCode() == Status.Code.CANCELLED) { + logger.log(Level.WARNING, + "Durable Task worker has disconnected from {0} while completing the activity task.", + this.getSidecarAddress()); + } else { + logger.log(Level.WARNING, "Unexpected failure completing the activity task at {0}.", + this.getSidecarAddress()); + } + } + }); + } else if (requestType == OrchestratorService.WorkItem.RequestCase.HEALTHPING) { + // No-op + } else { + logger.log(Level.WARNING, + "Received and dropped an unknown '{0}' work-item from the sidecar.", + requestType); + } + } + } catch (StatusRuntimeException e) { + if (e.getStatus().getCode() == Status.Code.UNAVAILABLE) { + logger.log(Level.INFO, "The sidecar at address {0} is unavailable. Will continue retrying.", + this.getSidecarAddress()); + } else if (e.getStatus().getCode() == Status.Code.CANCELLED) { + logger.log(Level.INFO, "Durable Task worker has disconnected from {0}.", this.getSidecarAddress()); + } else { + logger.log(Level.WARNING, + String.format("Unexpected failure connecting to %s", this.getSidecarAddress()), e); + } + + // Retry after 5 seconds + try { + Thread.sleep(5000); + } catch (InterruptedException ex) { + break; + } + } + } + } + + /** + * Stops the current worker's listen loop, preventing any new orchestrator or + * activity events from being processed. + */ + public void stop() { + this.close(); + } + + private void closeSideCarChannel() { + if (this.managedSidecarChannel != null) { + try { + this.managedSidecarChannel.shutdownNow().awaitTermination(5, TimeUnit.SECONDS); + } catch (InterruptedException e) { + // Best effort. Also note that AutoClose documentation recommends NOT having + // close() methods throw InterruptedException: + // https://docs.oracle.com/javase/7/docs/api/java/lang/AutoCloseable.html + } + } + } + + private void shutDownWorkerPool() { + if (this.isExecutorServiceManaged) { + if (!this.isNormalShutdown) { + logger.log(Level.WARNING, + "ExecutorService shutdown initiated unexpectedly. No new tasks will be accepted"); + } + + this.workerPool.shutdown(); + try { + if (!this.workerPool.awaitTermination(60, TimeUnit.SECONDS)) { + this.workerPool.shutdownNow(); + } + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + } + + private String getSidecarAddress() { + return this.sidecarClient.getChannel().authority(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java new file mode 100644 index 0000000000..0d3ebf2274 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/DurableTaskGrpcWorkerBuilder.java @@ -0,0 +1,164 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.grpc.Channel; + +import java.time.Duration; +import java.util.HashMap; +import java.util.concurrent.ExecutorService; + +/** + * Builder object for constructing customized {@link DurableTaskGrpcWorker} instances. + * + */ +public final class DurableTaskGrpcWorkerBuilder { + final HashMap orchestrationFactories = new HashMap<>(); + final HashMap activityFactories = new HashMap<>(); + int port; + Channel channel; + DataConverter dataConverter; + Duration maximumTimerInterval; + ExecutorService executorService; + String appId; // App ID for cross-app routing + + /** + * Adds an orchestration factory to be used by the constructed {@link DurableTaskGrpcWorker}. + * + * @param factory an orchestration factory to be used by the constructed {@link DurableTaskGrpcWorker} + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder addOrchestration(TaskOrchestrationFactory factory) { + String key = factory.getName(); + if (key == null || key.length() == 0) { + throw new IllegalArgumentException("A non-empty task orchestration name is required."); + } + + if (this.orchestrationFactories.containsKey(key)) { + throw new IllegalArgumentException( + String.format("A task orchestration factory named %s is already registered.", key)); + } + + this.orchestrationFactories.put(key, factory); + return this; + } + + /** + * Adds an activity factory to be used by the constructed {@link DurableTaskGrpcWorker}. + * + * @param factory an activity factory to be used by the constructed {@link DurableTaskGrpcWorker} + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder addActivity(TaskActivityFactory factory) { + // TODO: Input validation + String key = factory.getName(); + if (key == null || key.length() == 0) { + throw new IllegalArgumentException("A non-empty task activity name is required."); + } + + if (this.activityFactories.containsKey(key)) { + throw new IllegalArgumentException( + String.format("A task activity factory named %s is already registered.", key)); + } + + this.activityFactories.put(key, factory); + return this; + } + + /** + * Sets the gRPC channel to use for communicating with the sidecar process. + * + *

This builder method allows you to provide your own gRPC channel for communicating with the Durable Task sidecar + * endpoint. Channels provided using this method won't be closed when the worker is closed. + * Rather, the caller remains responsible for shutting down the channel after disposing the worker.

+ * + *

If not specified, a gRPC channel will be created automatically for each constructed + * {@link DurableTaskGrpcWorker}.

+ * + * @param channel the gRPC channel to use + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder grpcChannel(Channel channel) { + this.channel = channel; + return this; + } + + /** + * Sets the gRPC endpoint port to connect to. If not specified, the default Durable Task port number will be used. + * + * @param port the gRPC endpoint port to connect to + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder port(int port) { + this.port = port; + return this; + } + + /** + * Sets the {@link DataConverter} to use for converting serializable data payloads. + * + * @param dataConverter the {@link DataConverter} to use for converting serializable data payloads + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder dataConverter(DataConverter dataConverter) { + this.dataConverter = dataConverter; + return this; + } + + /** + * Sets the maximum timer interval. If not specified, the default maximum timer interval duration will be used. + * The default maximum timer interval duration is 3 days. + * + * @param maximumTimerInterval the maximum timer interval + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder maximumTimerInterval(Duration maximumTimerInterval) { + this.maximumTimerInterval = maximumTimerInterval; + return this; + } + + /** + * Sets the executor service that will be used to execute threads. + * + * @param executorService {@link ExecutorService}. + * @return this builder object. + */ + public DurableTaskGrpcWorkerBuilder withExecutorService(ExecutorService executorService) { + this.executorService = executorService; + return this; + } + + /** + * Sets the app ID for cross-app workflow routing. + * + *

This app ID is used to identify this worker in cross-app routing scenarios. + * It should match the app ID configured in the Dapr sidecar.

+ * + * @param appId the app ID for this worker + * @return this builder object + */ + public DurableTaskGrpcWorkerBuilder appId(String appId) { + this.appId = appId; + return this; + } + + /** + * Initializes a new {@link DurableTaskGrpcWorker} object with the settings specified in the current builder object. + * + * @return a new {@link DurableTaskGrpcWorker} object + */ + public DurableTaskGrpcWorker build() { + return new DurableTaskGrpcWorker(this); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java b/durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java new file mode 100644 index 0000000000..f5d9d834ea --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/FailureDetails.java @@ -0,0 +1,145 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.TaskFailureDetails; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; + +/** + * Class that represents the details of a task failure. + * + *

In most cases, failures are caused by unhandled exceptions in activity or orchestrator code, in which case + * instances of this class will expose the details of the exception. However, it's also possible that other types + * of errors could result in task failures, in which case there may not be any exception-specific information.

+ */ +public final class FailureDetails { + private final String errorType; + private final String errorMessage; + private final String stackTrace; + private final boolean isNonRetriable; + + FailureDetails( + String errorType, + @Nullable String errorMessage, + @Nullable String errorDetails, + boolean isNonRetriable) { + this.errorType = errorType; + this.stackTrace = errorDetails; + + // Error message can be null for things like NullPointerException but the gRPC contract doesn't allow null + this.errorMessage = errorMessage != null ? errorMessage : ""; + this.isNonRetriable = isNonRetriable; + } + + FailureDetails(Exception exception) { + this(exception.getClass().getName(), exception.getMessage(), getFullStackTrace(exception), false); + } + + FailureDetails(TaskFailureDetails proto) { + this(proto.getErrorType(), + proto.getErrorMessage(), + proto.getStackTrace().getValue(), + proto.getIsNonRetriable()); + } + + /** + * Gets the exception class name if the failure was caused by an unhandled exception. Otherwise, gets a symbolic + * name that describes the general type of error that was encountered. + * + * @return the error type as a {@code String} value + */ + @Nonnull + public String getErrorType() { + return this.errorType; + } + + /** + * Gets a summary description of the error that caused this failure. If the failure was caused by an exception, the + * exception message is returned. + * + * @return a summary description of the error + */ + @Nonnull + public String getErrorMessage() { + return this.errorMessage; + } + + /** + * Gets the stack trace of the exception that caused this failure, or {@code null} if the failure was caused by + * a non-exception error. + * + * @return the stack trace of the failure exception or {@code null} if the failure was not caused by an exception + */ + @Nullable + public String getStackTrace() { + return this.stackTrace; + } + + /** + * Returns {@code true} if the failure doesn't permit retries, otherwise {@code false}. + * + * @return {@code true} if the failure doesn't permit retries, otherwise {@code false}. + */ + public boolean isNonRetriable() { + return this.isNonRetriable; + } + + /** + * Returns {@code true} if the task failure was provided by the specified exception type, otherwise {@code false}. + * + *

This method allows checking if a task failed due to a specific exception type by attempting to load the class + * specified in {@link #getErrorType()}. If the exception class cannot be loaded for any reason, this method will + * return {@code false}. Base types are supported by this method, as shown in the following example:

+ *
{@code
+   * boolean isRuntimeException = failureDetails.isCausedBy(RuntimeException.class);
+   * }
+ * + * @param exceptionClass the class representing the exception type to test + * @return {@code true} if the task failure was provided by the specified exception type, otherwise {@code false} + */ + public boolean isCausedBy(Class exceptionClass) { + String actualClassName = this.getErrorType(); + try { + // Try using reflection to load the failure's class type and see if it's a subtype of the specified + // exception. For example, this should always succeed if exceptionClass is System.Exception. + Class actualExceptionClass = Class.forName(actualClassName); + return exceptionClass.isAssignableFrom(actualExceptionClass); + } catch (ClassNotFoundException ex) { + // Can't load the class and thus can't tell if it's related + return false; + } + } + + static String getFullStackTrace(Throwable e) { + StackTraceElement[] elements = e.getStackTrace(); + + // Plan for 256 characters per stack frame (which is likely on the high-end) + StringBuilder sb = new StringBuilder(elements.length * 256); + for (StackTraceElement element : elements) { + sb.append("\tat ").append(element.toString()).append(System.lineSeparator()); + } + return sb.toString(); + } + + TaskFailureDetails toProto() { + return TaskFailureDetails.newBuilder() + .setErrorType(this.getErrorType()) + .setErrorMessage(this.getErrorMessage()) + .setStackTrace(StringValue.of(this.getStackTrace() != null ? this.getStackTrace() : "")) + .build(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java b/durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java new file mode 100644 index 0000000000..265bb0ab06 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/Helpers.java @@ -0,0 +1,77 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nonnull; +import javax.annotation.Nullable; +import java.time.Duration; + +final class Helpers { + static final Duration maxDuration = Duration.ofSeconds(Long.MAX_VALUE, 999999999L); + + static @Nonnull V throwIfArgumentNull(@Nullable V argValue, String argName) { + if (argValue == null) { + throw new IllegalArgumentException("The argument '" + argName + "' was null."); + } + + return argValue; + } + + static @Nonnull String throwIfArgumentNullOrWhiteSpace(String argValue, String argName) { + throwIfArgumentNull(argValue, argName); + if (argValue.trim().length() == 0) { + throw new IllegalArgumentException("The argument '" + argName + "' was empty or contained only whitespace."); + } + + return argValue; + } + + static void throwIfOrchestratorComplete(boolean isComplete) { + if (isComplete) { + throw new IllegalStateException("The orchestrator has already completed"); + } + } + + static boolean isInfiniteTimeout(Duration timeout) { + return timeout == null || timeout.isNegative() || timeout.equals(maxDuration); + } + + static double powExact(double base, double exponent) throws ArithmeticException { + if (base == 0.0) { + return 0.0; + } + + double result = Math.pow(base, exponent); + + if (result == Double.POSITIVE_INFINITY) { + throw new ArithmeticException("Double overflow resulting in POSITIVE_INFINITY"); + } else if (result == Double.NEGATIVE_INFINITY) { + throw new ArithmeticException("Double overflow resulting in NEGATIVE_INFINITY"); + } else if (Double.compare(-0.0f, result) == 0) { + throw new ArithmeticException("Double overflow resulting in negative zero"); + } else if (Double.compare(+0.0f, result) == 0) { + throw new ArithmeticException("Double overflow resulting in positive zero"); + } + + return result; + } + + static boolean isNullOrEmpty(String s) { + return s == null || s.isEmpty(); + } + + // Cannot be instantiated + private Helpers() { + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java b/durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java new file mode 100644 index 0000000000..29912aa3f1 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/JacksonDataConverter.java @@ -0,0 +1,58 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.json.JsonMapper; + +/** + * An implementation of {@link DataConverter} that uses Jackson APIs for data serialization. + */ +public final class JacksonDataConverter implements DataConverter { + // Static singletons are recommended by the Jackson documentation + private static final ObjectMapper jsonObjectMapper = JsonMapper.builder() + .findAndAddModules() + .build(); + + @Override + public String serialize(Object value) { + if (value == null) { + return null; + } + + try { + return jsonObjectMapper.writeValueAsString(value); + } catch (JsonProcessingException e) { + throw new DataConverterException( + String.format("Failed to serialize argument of type '%s'. Detailed error message: %s", + value.getClass().getName(), e.getMessage()), + e); + } + } + + @Override + public T deserialize(String jsonText, Class targetType) { + if (jsonText == null || jsonText.length() == 0 || targetType == Void.class) { + return null; + } + + try { + return jsonObjectMapper.readValue(jsonText, targetType); + } catch (JsonProcessingException e) { + throw new DataConverterException(String.format("Failed to deserialize the JSON text to %s. " + + "Detailed error message: %s", targetType.getName(), e.getMessage()), e); + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java b/durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java new file mode 100644 index 0000000000..32639e41d1 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/NewOrchestrationInstanceOptions.java @@ -0,0 +1,147 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.time.Instant; + +/** + * Options for starting a new instance of an orchestration. + */ +public final class NewOrchestrationInstanceOptions { + private String version; + private String instanceId; + private Object input; + private Instant startTime; + private String appID; // Target app ID for cross-app workflow routing + + /** + * Default constructor for the {@link NewOrchestrationInstanceOptions} class. + */ + public NewOrchestrationInstanceOptions() { + } + + /** + * Sets the version of the orchestration to start. + * + * @param version the user-defined version of the orchestration + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setVersion(String version) { + this.version = version; + return this; + } + + /** + * Sets the instance ID of the orchestration to start. + * If no instance ID is configured, the orchestration will be created with a randomly generated instance ID. + * + * @param instanceId the ID of the new orchestration instance + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setInstanceId(String instanceId) { + this.instanceId = instanceId; + return this; + } + + /** + * Sets the input of the orchestration to start. + * There are no restrictions on the type of inputs that can be used except that they must be serializable using + * the {@link DataConverter} that was configured for the {@link DurableTaskClient} at creation time. + * + * @param input the input of the new orchestration instance + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setInput(Object input) { + this.input = input; + return this; + } + + /** + * Sets the start time of the new orchestration instance. + * By default, new orchestration instances start executing immediately. This method can be used + * to start them at a specific time in the future. + * + * @param startTime the start time of the new orchestration instance + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setStartTime(Instant startTime) { + this.startTime = startTime; + return this; + } + + /** + * Sets the target app ID for cross-app workflow routing. + * + * @param appID the target app ID for cross-app routing + * @return this {@link NewOrchestrationInstanceOptions} object + */ + public NewOrchestrationInstanceOptions setAppID(String appID) { + this.appID = appID; + return this; + } + + /** + * Gets the user-specified version of the new orchestration. + * + * @return the user-specified version of the new orchestration. + */ + public String getVersion() { + return this.version; + } + + /** + * Gets the instance ID of the new orchestration. + * + * @return the instance ID of the new orchestration. + */ + public String getInstanceId() { + return this.instanceId; + } + + /** + * Gets the input of the new orchestration. + * + * @return the input of the new orchestration. + */ + public Object getInput() { + return this.input; + } + + /** + * Gets the configured start time of the new orchestration instance. + * + * @return the configured start time of the new orchestration instance. + */ + public Instant getStartTime() { + return this.startTime; + } + + /** + * Gets the configured target app ID for cross-app workflow routing. + * + * @return the configured target app ID + */ + public String getAppID() { + return this.appID; + } + + /** + * Checks if an app ID is configured for cross-app routing. + * + * @return true if an app ID is configured, false otherwise + */ + public boolean hasAppID() { + return this.appID != null && !this.appID.isEmpty(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java b/durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java new file mode 100644 index 0000000000..101e6bd04a --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/NonDeterministicOrchestratorException.java @@ -0,0 +1,20 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +final class NonDeterministicOrchestratorException extends RuntimeException { + public NonDeterministicOrchestratorException(String message) { + super(message); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java new file mode 100644 index 0000000000..a0565ba634 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationMetadata.java @@ -0,0 +1,283 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationState; + +import java.time.Instant; + +import static io.dapr.durabletask.Helpers.isNullOrEmpty; + +/** + * Represents a snapshot of an orchestration instance's current state, including metadata. + * + *

Instances of this class are produced by methods in the {@link DurableTaskClient} class, such as + * {@link DurableTaskClient#getInstanceMetadata}, {@link DurableTaskClient#waitForInstanceStart} and + * {@link DurableTaskClient#waitForInstanceCompletion}.

+ */ +public final class OrchestrationMetadata { + private final DataConverter dataConverter; + private final boolean requestedInputsAndOutputs; + + private final String name; + private final String instanceId; + private final OrchestrationRuntimeStatus runtimeStatus; + private final Instant createdAt; + private final Instant lastUpdatedAt; + private final String serializedInput; + private final String serializedOutput; + private final String serializedCustomStatus; + private final FailureDetails failureDetails; + + OrchestrationMetadata( + OrchestratorService.GetInstanceResponse fetchResponse, + DataConverter dataConverter, + boolean requestedInputsAndOutputs) { + this(fetchResponse.getOrchestrationState(), dataConverter, requestedInputsAndOutputs); + } + + OrchestrationMetadata( + OrchestrationState state, + DataConverter dataConverter, + boolean requestedInputsAndOutputs) { + this.dataConverter = dataConverter; + this.requestedInputsAndOutputs = requestedInputsAndOutputs; + + this.name = state.getName(); + this.instanceId = state.getInstanceId(); + this.runtimeStatus = OrchestrationRuntimeStatus.fromProtobuf(state.getOrchestrationStatus()); + this.createdAt = DataConverter.getInstantFromTimestamp(state.getCreatedTimestamp()); + this.lastUpdatedAt = DataConverter.getInstantFromTimestamp(state.getLastUpdatedTimestamp()); + this.serializedInput = state.getInput().getValue(); + this.serializedOutput = state.getOutput().getValue(); + this.serializedCustomStatus = state.getCustomStatus().getValue(); + this.failureDetails = new FailureDetails(state.getFailureDetails()); + } + + /** + * Gets the name of the orchestration. + * + * @return the name of the orchestration + */ + public String getName() { + return this.name; + } + + /** + * Gets the unique ID of the orchestration instance. + * + * @return the unique ID of the orchestration instance + */ + public String getInstanceId() { + return this.instanceId; + } + + /** + * Gets the current runtime status of the orchestration instance at the time this object was fetched. + * + * @return the current runtime status of the orchestration instance at the time this object was fetched + */ + public OrchestrationRuntimeStatus getRuntimeStatus() { + return this.runtimeStatus; + } + + /** + * Gets the orchestration instance's creation time in UTC. + * + * @return the orchestration instance's creation time in UTC + */ + public Instant getCreatedAt() { + return this.createdAt; + } + + /** + * Gets the orchestration instance's last updated time in UTC. + * + * @return the orchestration instance's last updated time in UTC + */ + public Instant getLastUpdatedAt() { + return this.lastUpdatedAt; + } + + /** + * Gets the orchestration instance's serialized input, if any, as a string value. + * + * @return the orchestration instance's serialized input or {@code null} + */ + public String getSerializedInput() { + return this.serializedInput; + } + + /** + * Gets the orchestration instance's serialized output, if any, as a string value. + * + * @return the orchestration instance's serialized output or {@code null} + */ + public String getSerializedOutput() { + return this.serializedOutput; + } + + /** + * Gets the failure details, if any, for the failed orchestration instance. + * + *

This method returns data only if the orchestration is in the {@link OrchestrationRuntimeStatus#FAILED} state, + * and only if this instance metadata was fetched with the option to include output data.

+ * + * @return the failure details of the failed orchestration instance or {@code null} + */ + public FailureDetails getFailureDetails() { + return this.failureDetails; + } + + /** + * Gets a value indicating whether the orchestration instance was running at the time this object was fetched. + * + * @return {@code true} if the orchestration existed and was in a running state; otherwise {@code false} + */ + public boolean isRunning() { + return isInstanceFound() && this.runtimeStatus == OrchestrationRuntimeStatus.RUNNING; + } + + /** + * Gets a value indicating whether the orchestration instance was completed at the time this object was fetched. + * + *

An orchestration instance is considered completed when its runtime status value is + * {@link OrchestrationRuntimeStatus#COMPLETED}, {@link OrchestrationRuntimeStatus#FAILED}, or + * {@link OrchestrationRuntimeStatus#TERMINATED}.

+ * + * @return {@code true} if the orchestration was in a terminal state; otherwise {@code false} + */ + public boolean isCompleted() { + return + this.runtimeStatus == OrchestrationRuntimeStatus.COMPLETED + || this.runtimeStatus == OrchestrationRuntimeStatus.FAILED + || this.runtimeStatus == OrchestrationRuntimeStatus.TERMINATED; + } + + /** + * Deserializes the orchestration's input into an object of the specified type. + * + *

Deserialization is performed using the {@link DataConverter} that was configured on + * the {@link DurableTaskClient} object that created this orchestration metadata object.

+ * + * @param type the class associated with the type to deserialize the input data into + * @param the type to deserialize the input data into + * @return the deserialized input value + * @throws IllegalStateException if the metadata was fetched without the option to read inputs and outputs + */ + public T readInputAs(Class type) { + return this.readPayloadAs(type, this.serializedInput); + } + + /** + * Deserializes the orchestration's output into an object of the specified type. + * + *

Deserialization is performed using the {@link DataConverter} that was configured on + * the {@link DurableTaskClient} object that created this orchestration metadata object.

+ * + * @param type the class associated with the type to deserialize the output data into + * @param the type to deserialize the output data into + * @return the deserialized input value + * @throws IllegalStateException if the metadata was fetched without the option to read inputs and outputs + */ + public T readOutputAs(Class type) { + return this.readPayloadAs(type, this.serializedOutput); + } + + /** + * Deserializes the orchestration's custom status into an object of the specified type. + * + *

Deserialization is performed using the {@link DataConverter} that was configured on + * the {@link DurableTaskClient} object that created this orchestration metadata object.

+ * + * @param type the class associated with the type to deserialize the custom status data into + * @param the type to deserialize the custom status data into + * @return the deserialized input value + * @throws IllegalStateException if the metadata was fetched without the option to read inputs and outputs + */ + public T readCustomStatusAs(Class type) { + return this.readPayloadAs(type, this.serializedCustomStatus); + } + + /** + * Returns {@code true} if the orchestration has a non-empty custom status value; otherwise {@code false}. + * + *

This method will always return {@code false} if the metadata was fetched without the option to read inputs and + * outputs.

+ * + * @return {@code true} if the orchestration has a non-empty custom status value; otherwise {@code false} + */ + public boolean isCustomStatusFetched() { + return this.serializedCustomStatus != null && !this.serializedCustomStatus.isEmpty(); + } + + private T readPayloadAs(Class type, String payload) { + if (!this.requestedInputsAndOutputs) { + throw new IllegalStateException("This method can only be used when instance metadata is fetched with the option " + + "to include input and output data."); + } + + // Note that the Java gRPC implementation converts null protobuf strings into empty Java strings + if (payload == null || payload.isEmpty()) { + return null; + } + + return this.dataConverter.deserialize(payload, type); + } + + /** + * Generates a user-friendly string representation of the current metadata object. + * + * @return a user-friendly string representation of the current metadata object + */ + @Override + public String toString() { + String baseString = String.format( + "[Name: '%s', ID: '%s', RuntimeStatus: %s, CreatedAt: %s, LastUpdatedAt: %s", + this.name, + this.instanceId, + this.runtimeStatus, + this.createdAt, + this.lastUpdatedAt); + StringBuilder sb = new StringBuilder(baseString); + if (this.serializedInput != null) { + sb.append(", Input: '").append(getTrimmedPayload(this.serializedInput)).append('\''); + } + + if (this.serializedOutput != null) { + sb.append(", Output: '").append(getTrimmedPayload(this.serializedOutput)).append('\''); + } + + return sb.append(']').toString(); + } + + private static String getTrimmedPayload(String payload) { + int maxLength = 50; + if (payload.length() > maxLength) { + return payload.substring(0, maxLength) + "..."; + } + + return payload; + } + + /** + * Returns {@code true} if an orchestration instance with this ID was found; otherwise {@code false}. + * + * @return {@code true} if an orchestration instance with this ID was found; otherwise {@code false} + */ + public boolean isInstanceFound() { + return !(isNullOrEmpty(this.name) && isNullOrEmpty(this.instanceId)); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java new file mode 100644 index 0000000000..22b2154608 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRunner.java @@ -0,0 +1,169 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.StringValue; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; + +import java.time.Duration; +import java.util.Base64; +import java.util.HashMap; +import java.util.logging.Logger; + +/** + * Helper class for invoking orchestrations directly, without constructing a {@link DurableTaskGrpcWorker} object. + * + *

This static class can be used to execute orchestration logic directly. In order to use it for this purpose, the + * caller must provide orchestration state as serialized protobuf bytes.

+ */ +public final class OrchestrationRunner { + private static final Logger logger = Logger.getLogger(OrchestrationRunner.class.getPackage().getName()); + private static final Duration DEFAULT_MAXIMUM_TIMER_INTERVAL = Duration.ofDays(3); + + private OrchestrationRunner() { + } + + /** + * Loads orchestration history from {@code base64EncodedOrchestratorRequest} and uses it to execute the + * orchestrator function code pointed to by {@code orchestratorFunc}. + * + * @param base64EncodedOrchestratorRequest the base64-encoded protobuf payload representing an orchestrator execution + * request + * @param orchestratorFunc a function that implements the orchestrator logic + * @param the type of the orchestrator function output, which must be serializable + * to JSON + * @return a base64-encoded protobuf payload of orchestrator actions to be interpreted by the external + * orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or + * if {@code base64EncodedOrchestratorRequest} is not valid base64-encoded protobuf + */ + public static String loadAndRun( + String base64EncodedOrchestratorRequest, + OrchestratorFunction orchestratorFunc) { + // Example string: CiBhOTMyYjdiYWM5MmI0MDM5YjRkMTYxMDIwNzlmYTM1YSIaCP///////////wESCwi254qRBhDk+rgocgAicgj////// + // ///8BEgwIs+eKkQYQzMXjnQMaVwoLSGVsbG9DaXRpZXMSACJGCiBhOTMyYjdiYWM5MmI0MDM5YjRkMTYxMDIwNzlmYTM1YRIiCiA3ODEwOTA + // 2N2Q4Y2Q0ODg1YWU4NjQ0OTNlMmRlMGQ3OA== + byte[] decodedBytes = Base64.getDecoder().decode(base64EncodedOrchestratorRequest); + byte[] resultBytes = loadAndRun(decodedBytes, orchestratorFunc); + return Base64.getEncoder().encodeToString(resultBytes); + } + + /** + * Loads orchestration history from {@code orchestratorRequestBytes} and uses it to execute the + * orchestrator function code pointed to by {@code orchestratorFunc}. + * + * @param orchestratorRequestBytes the protobuf payload representing an orchestrator execution request + * @param orchestratorFunc a function that implements the orchestrator logic + * @param the type of the orchestrator function output, which must be serializable to JSON + * @return a protobuf-encoded payload of orchestrator actions to be interpreted by the external orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or if {@code orchestratorRequestBytes} is + * not valid protobuf + */ + public static byte[] loadAndRun( + byte[] orchestratorRequestBytes, + OrchestratorFunction orchestratorFunc) { + if (orchestratorFunc == null) { + throw new IllegalArgumentException("orchestratorFunc must not be null"); + } + + // Wrap the provided lambda in an anonymous TaskOrchestration + TaskOrchestration orchestration = ctx -> { + R output = orchestratorFunc.apply(ctx); + ctx.complete(output); + }; + + return loadAndRun(orchestratorRequestBytes, orchestration); + } + + /** + * Loads orchestration history from {@code base64EncodedOrchestratorRequest} and uses it to execute the + * {@code orchestration}. + * + * @param base64EncodedOrchestratorRequest the base64-encoded protobuf payload representing an orchestrator + * execution request + * @param orchestration the orchestration to execute + * @return a base64-encoded protobuf payload of orchestrator actions to be interpreted by the external + * orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or + * if {@code base64EncodedOrchestratorRequest} is not valid base64-encoded protobuf + */ + public static String loadAndRun( + String base64EncodedOrchestratorRequest, + TaskOrchestration orchestration) { + byte[] decodedBytes = Base64.getDecoder().decode(base64EncodedOrchestratorRequest); + byte[] resultBytes = loadAndRun(decodedBytes, orchestration); + return Base64.getEncoder().encodeToString(resultBytes); + } + + /** + * Loads orchestration history from {@code orchestratorRequestBytes} and uses it to execute the + * {@code orchestration}. + * + * @param orchestratorRequestBytes the protobuf payload representing an orchestrator execution request + * @param orchestration the orchestration to execute + * @return a protobuf-encoded payload of orchestrator actions to be interpreted by the external orchestration engine + * @throws IllegalArgumentException if either parameter is {@code null} or if {@code orchestratorRequestBytes} + * is not valid protobuf + */ + public static byte[] loadAndRun(byte[] orchestratorRequestBytes, TaskOrchestration orchestration) { + if (orchestratorRequestBytes == null || orchestratorRequestBytes.length == 0) { + throw new IllegalArgumentException("triggerStateProtoBytes must not be null or empty"); + } + + if (orchestration == null) { + throw new IllegalArgumentException("orchestration must not be null"); + } + + OrchestratorService.OrchestratorRequest orchestratorRequest; + try { + orchestratorRequest = OrchestratorService.OrchestratorRequest.parseFrom(orchestratorRequestBytes); + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException("triggerStateProtoBytes was not valid protobuf", e); + } + + // Register the passed orchestration as the default ("*") orchestration + HashMap orchestrationFactories = new HashMap<>(); + orchestrationFactories.put("*", new TaskOrchestrationFactory() { + @Override + public String getName() { + return "*"; + } + + @Override + public TaskOrchestration create() { + return orchestration; + } + }); + + TaskOrchestrationExecutor taskOrchestrationExecutor = new TaskOrchestrationExecutor( + orchestrationFactories, + new JacksonDataConverter(), + DEFAULT_MAXIMUM_TIMER_INTERVAL, + logger, + null); // No app ID for static runner + + // TODO: Error handling + TaskOrchestratorResult taskOrchestratorResult = taskOrchestrationExecutor.execute( + orchestratorRequest.getPastEventsList(), + orchestratorRequest.getNewEventsList()); + + OrchestratorService.OrchestratorResponse response = OrchestratorService.OrchestratorResponse.newBuilder() + .setInstanceId(orchestratorRequest.getInstanceId()) + .addAllActions(taskOrchestratorResult.getActions()) + .setCustomStatus(StringValue.of(taskOrchestratorResult.getCustomStatus())) + .build(); + return response.toByteArray(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java new file mode 100644 index 0000000000..1bdd33ab38 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationRuntimeStatus.java @@ -0,0 +1,118 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; + +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_CANCELED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_COMPLETED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_CONTINUED_AS_NEW; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_FAILED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_PENDING; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_RUNNING; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_SUSPENDED; +import static io.dapr.durabletask.implementation.protobuf.OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_TERMINATED; + +/** + * Enum describing the runtime status of the orchestration. + */ +public enum OrchestrationRuntimeStatus { + /** + * The orchestration started running. + */ + RUNNING, + + /** + * The orchestration completed normally. + */ + COMPLETED, + + /** + * The orchestration is transitioning into a new instance. + * This status value is obsolete and exists only for compatibility reasons. + */ + CONTINUED_AS_NEW, + + /** + * The orchestration completed with an unhandled exception. + */ + FAILED, + + /** + * The orchestration canceled gracefully. + * The Canceled status is not currently used and exists only for compatibility reasons. + */ + CANCELED, + + /** + * The orchestration was abruptly terminated via a management API call. + */ + TERMINATED, + + /** + * The orchestration was scheduled but hasn't started running. + */ + PENDING, + + /** + * The orchestration is in a suspended state. + */ + SUSPENDED; + + static OrchestrationRuntimeStatus fromProtobuf(OrchestratorService.OrchestrationStatus status) { + switch (status) { + case ORCHESTRATION_STATUS_RUNNING: + return RUNNING; + case ORCHESTRATION_STATUS_COMPLETED: + return COMPLETED; + case ORCHESTRATION_STATUS_CONTINUED_AS_NEW: + return CONTINUED_AS_NEW; + case ORCHESTRATION_STATUS_FAILED: + return FAILED; + case ORCHESTRATION_STATUS_CANCELED: + return CANCELED; + case ORCHESTRATION_STATUS_TERMINATED: + return TERMINATED; + case ORCHESTRATION_STATUS_PENDING: + return PENDING; + case ORCHESTRATION_STATUS_SUSPENDED: + return SUSPENDED; + default: + throw new IllegalArgumentException(String.format("Unknown status value: %s", status)); + } + } + + static OrchestratorService.OrchestrationStatus toProtobuf(OrchestrationRuntimeStatus status) { + switch (status) { + case RUNNING: + return ORCHESTRATION_STATUS_RUNNING; + case COMPLETED: + return ORCHESTRATION_STATUS_COMPLETED; + case CONTINUED_AS_NEW: + return ORCHESTRATION_STATUS_CONTINUED_AS_NEW; + case FAILED: + return ORCHESTRATION_STATUS_FAILED; + case CANCELED: + return ORCHESTRATION_STATUS_CANCELED; + case TERMINATED: + return ORCHESTRATION_STATUS_TERMINATED; + case PENDING: + return ORCHESTRATION_STATUS_PENDING; + case SUSPENDED: + return ORCHESTRATION_STATUS_SUSPENDED; + default: + throw new IllegalArgumentException(String.format("Unknown status value: %s", status)); + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java new file mode 100644 index 0000000000..864fc37c82 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQuery.java @@ -0,0 +1,217 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +/** + * Class used for constructing orchestration metadata queries. + */ +public final class OrchestrationStatusQuery { + private List runtimeStatusList = new ArrayList<>(); + private Instant createdTimeFrom; + private Instant createdTimeTo; + private List taskHubNames = new ArrayList<>(); + private int maxInstanceCount = 100; + private String continuationToken; + private String instanceIdPrefix; + private boolean fetchInputsAndOutputs; + + /** + * Sole constructor. + */ + public OrchestrationStatusQuery() { + } + + /** + * Sets the list of runtime status values to use as a filter. Only orchestration instances that have a matching + * runtime status will be returned. The default {@code null} value will disable runtime status filtering. + * + * @param runtimeStatusList the list of runtime status values to use as a filter + * @return this query object + */ + public OrchestrationStatusQuery setRuntimeStatusList(@Nullable List runtimeStatusList) { + this.runtimeStatusList = runtimeStatusList; + return this; + } + + /** + * Include orchestration instances that were created after the specified instant. + * + * @param createdTimeFrom the minimum orchestration creation time to use as a filter or {@code null} to disable this + * filter + * @return this query object + */ + public OrchestrationStatusQuery setCreatedTimeFrom(@Nullable Instant createdTimeFrom) { + this.createdTimeFrom = createdTimeFrom; + return this; + } + + /** + * Include orchestration instances that were created before the specified instant. + * + * @param createdTimeTo the maximum orchestration creation time to use as a filter or {@code null} to disable this + * filter + * @return this query object + */ + public OrchestrationStatusQuery setCreatedTimeTo(@Nullable Instant createdTimeTo) { + this.createdTimeTo = createdTimeTo; + return this; + } + + /** + * Sets the maximum number of records that can be returned by the query. The default value is 100. + * + *

Requests may return fewer records than the specified page size, even if there are more records. + * Always check the continuation token to determine whether there are more records.

+ * + * @param maxInstanceCount the maximum number of orchestration metadata records to return + * @return this query object + */ + public OrchestrationStatusQuery setMaxInstanceCount(int maxInstanceCount) { + this.maxInstanceCount = maxInstanceCount; + return this; + } + + /** + * Include orchestration metadata records that have a matching task hub name. + * + * @param taskHubNames the task hub name to match or {@code null} to disable this filter + * @return this query object + */ + public OrchestrationStatusQuery setTaskHubNames(@Nullable List taskHubNames) { + this.taskHubNames = taskHubNames; + return this; + } + + /** + * Sets the continuation token used to continue paging through orchestration metadata results. + * + *

This should always be the continuation token value from the previous query's + * {@link OrchestrationStatusQueryResult} result.

+ * + * @param continuationToken the continuation token from the previous query + * @return this query object + */ + public OrchestrationStatusQuery setContinuationToken(@Nullable String continuationToken) { + this.continuationToken = continuationToken; + return this; + } + + /** + * Include orchestration metadata records with the specified instance ID prefix. + * + *

For example, if there are three orchestration instances in the metadata store with IDs "Foo", "Bar", and "Baz", + * specifying a prefix value of "B" will exclude "Foo" since its ID doesn't start with "B".

+ * + * @param instanceIdPrefix the instance ID prefix filter value + * @return this query object + */ + public OrchestrationStatusQuery setInstanceIdPrefix(@Nullable String instanceIdPrefix) { + this.instanceIdPrefix = instanceIdPrefix; + return this; + } + + /** + * Sets whether to fetch orchestration inputs, outputs, and custom status values. The default value is {@code false}. + * + * @param fetchInputsAndOutputs {@code true} to fetch orchestration inputs, outputs, and custom status values, + * otherwise {@code false} + * @return this query object + */ + public OrchestrationStatusQuery setFetchInputsAndOutputs(boolean fetchInputsAndOutputs) { + this.fetchInputsAndOutputs = fetchInputsAndOutputs; + return this; + } + + /** + * Gets the configured runtime status filter or {@code null} if none was configured. + * + * @return the configured runtime status filter as a list of values or {@code null} if none was configured + */ + public List getRuntimeStatusList() { + return runtimeStatusList; + } + + /** + * Gets the configured minimum orchestration creation time or {@code null} if none was configured. + * + * @return the configured minimum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeFrom() { + return createdTimeFrom; + } + + /** + * Gets the configured maximum orchestration creation time or {@code null} if none was configured. + * + * @return the configured maximum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeTo() { + return createdTimeTo; + } + + /** + * Gets the configured maximum number of records that can be returned by the query. + * + * @return the configured maximum number of records that can be returned by the query + */ + public int getMaxInstanceCount() { + return maxInstanceCount; + } + + /** + * Gets the configured task hub names to match or {@code null} if none were configured. + * + * @return the configured task hub names to match or {@code null} if none were configured + */ + public List getTaskHubNames() { + return taskHubNames; + } + + /** + * Gets the configured continuation token value or {@code null} if none was configured. + * + * @return the configured continuation token value or {@code null} if none was configured + */ + @Nullable + public String getContinuationToken() { + return continuationToken; + } + + /** + * Gets the configured instance ID prefix filter value or {@code null} if none was configured. + * + * @return the configured instance ID prefix filter value or {@code null} if none was configured. + */ + @Nullable + public String getInstanceIdPrefix() { + return instanceIdPrefix; + } + + /** + * Gets the configured value that determines whether to fetch orchestration inputs, outputs, and custom status values. + * + * @return the configured value that determines whether to fetch orchestration inputs, outputs, and custom + * status values + */ + public boolean isFetchInputsAndOutputs() { + return fetchInputsAndOutputs; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java new file mode 100644 index 0000000000..efb4908c19 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestrationStatusQueryResult.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.util.List; + +/** + * Class representing the results of a filtered orchestration metadata query. + * + *

Orchestration metadata can be queried with filters using the {@link DurableTaskClient#queryInstances} method.

+ */ +public final class OrchestrationStatusQueryResult { + private final List orchestrationStates; + private final String continuationToken; + + OrchestrationStatusQueryResult(List orchestrationStates, @Nullable String continuationToken) { + this.orchestrationStates = orchestrationStates; + this.continuationToken = continuationToken; + } + + /** + * Gets the list of orchestration metadata records that matched the {@link DurableTaskClient#queryInstances} query. + * + * @return the list of orchestration metadata records that matched the {@link DurableTaskClient#queryInstances} query. + */ + public List getOrchestrationState() { + return this.orchestrationStates; + } + + /** + * Gets the continuation token to use with the next query or {@code null} if no more metadata records are found. + * + *

Note that a non-null value does not always mean that there are more metadata records that can be returned by a + * query.

+ * + * @return the continuation token to use with the next query or {@code null} if no more metadata records are found. + */ + public String getContinuationToken() { + return this.continuationToken; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java new file mode 100644 index 0000000000..a4d2f2f087 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/OrchestratorFunction.java @@ -0,0 +1,38 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Functional interface for inline orchestrator functions. + * + *

See the description of {@link TaskOrchestration} for more information about how to correctly + * implement orchestrators.

+ * + * @param the type of the result returned by the function + */ +@FunctionalInterface +public interface OrchestratorFunction { + /** + * Executes an orchestrator function and returns a result to use as the orchestration output. + * + *

This functional interface is designed to support implementing orchestrators as lambda functions. It's intended + * to be very similar to {@link java.util.function.Function}, but with a signature that's specific to + * orchestrators.

+ * + * @param ctx the orchestration context, which provides access to additional context for the current orchestration + * execution + * @return the serializable output of the orchestrator function + */ + R apply(TaskOrchestrationContext ctx); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java new file mode 100644 index 0000000000..50260c1fc7 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeInstanceCriteria.java @@ -0,0 +1,125 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +/** + * Class used for constructing orchestration instance purge selection criteria. + */ +public final class PurgeInstanceCriteria { + + private Instant createdTimeFrom; + private Instant createdTimeTo; + private List runtimeStatusList = new ArrayList<>(); + private Duration timeout; + + /** + * Creates a new, default instance of the {@code PurgeInstanceCriteria} class. + */ + public PurgeInstanceCriteria() { + } + + /** + * Purge orchestration instances that were created after the specified instant. + * + * @param createdTimeFrom the minimum orchestration creation time to use as a selection criteria or {@code null} to + * disable this selection criteria + * @return this criteria object + */ + public PurgeInstanceCriteria setCreatedTimeFrom(Instant createdTimeFrom) { + this.createdTimeFrom = createdTimeFrom; + return this; + } + + /** + * Purge orchestration instances that were created before the specified instant. + * + * @param createdTimeTo the maximum orchestration creation time to use as a selection criteria or {@code null} to + * disable this selection criteria + * @return this criteria object + */ + public PurgeInstanceCriteria setCreatedTimeTo(Instant createdTimeTo) { + this.createdTimeTo = createdTimeTo; + return this; + } + + /** + * Sets the list of runtime status values to use as a selection criteria. Only orchestration instances that have a + * matching runtime status will be purged. An empty list is the same as selecting for all runtime status values. + * + * @param runtimeStatusList the list of runtime status values to use as a selection criteria + * @return this criteria object + */ + public PurgeInstanceCriteria setRuntimeStatusList(List runtimeStatusList) { + this.runtimeStatusList = runtimeStatusList; + return this; + } + + /** + * Sets a timeout duration for the purge operation. Setting to {@code null} will reset the timeout + * to be the default value. + * + * @param timeout the amount of time to wait for the purge instance operation to complete + * @return this criteria object + */ + public PurgeInstanceCriteria setTimeout(Duration timeout) { + this.timeout = timeout; + return this; + } + + /** + * Gets the configured minimum orchestration creation time or {@code null} if none was configured. + * + * @return the configured minimum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeFrom() { + return this.createdTimeFrom; + } + + /** + * Gets the configured maximum orchestration creation time or {@code null} if none was configured. + * + * @return the configured maximum orchestration creation time or {@code null} if none was configured + */ + @Nullable + public Instant getCreatedTimeTo() { + return this.createdTimeTo; + } + + /** + * Gets the configured runtime status selection criteria. + * + * @return the configured runtime status filter as a list of values + */ + public List getRuntimeStatusList() { + return this.runtimeStatusList; + } + + /** + * Gets the configured timeout duration or {@code null} if none was configured. + * + * @return the configured timeout + */ + @Nullable + public Duration getTimeout() { + return this.timeout; + } + +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java new file mode 100644 index 0000000000..8d35218661 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/PurgeResult.java @@ -0,0 +1,37 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Class representing the results of an orchestration state purge operation. + * + *

Orchestration state can be purged using any of the {@link DurableTaskClient#purgeInstances} method overloads.

+ */ +public final class PurgeResult { + + private final int deletedInstanceCount; + + PurgeResult(int deletedInstanceCount) { + this.deletedInstanceCount = deletedInstanceCount; + } + + /** + * Gets the number of purged orchestration instances. + * + * @return the number of purged orchestration instances + */ + public int getDeletedInstanceCount() { + return this.deletedInstanceCount; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java b/durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java new file mode 100644 index 0000000000..620e02c7d3 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/RetryContext.java @@ -0,0 +1,79 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.time.Duration; + +/** + * Context data that's provided to {@link RetryHandler} implementations. + */ +public final class RetryContext { + private final TaskOrchestrationContext orchestrationContext; + private final int lastAttemptNumber; + private final FailureDetails lastFailure; + private final Duration totalRetryTime; + + RetryContext( + TaskOrchestrationContext orchestrationContext, + int lastAttemptNumber, + FailureDetails lastFailure, + Duration totalRetryTime) { + this.orchestrationContext = orchestrationContext; + this.lastAttemptNumber = lastAttemptNumber; + this.lastFailure = lastFailure; + this.totalRetryTime = totalRetryTime; + } + + /** + * Gets the context of the current orchestration. + * + *

The orchestration context can be used in retry handlers to schedule timers (via the + * {@link TaskOrchestrationContext#createTimer} methods) for implementing delays between retries. It can also be + * used to implement time-based retry logic by using the {@link TaskOrchestrationContext#getCurrentInstant} method. + *

+ * + * @return the context of the parent orchestration + */ + public TaskOrchestrationContext getOrchestrationContext() { + return this.orchestrationContext; + } + + /** + * Gets the details of the previous task failure, including the exception type, message, and callstack. + * + * @return the details of the previous task failure + */ + public FailureDetails getLastFailure() { + return this.lastFailure; + } + + /** + * Gets the previous retry attempt number. This number starts at 1 and increments each time the retry handler + * is invoked for a particular task failure. + * + * @return the previous retry attempt number + */ + public int getLastAttemptNumber() { + return this.lastAttemptNumber; + } + + /** + * Gets the total amount of time spent in a retry loop for the current task. + * + * @return the total amount of time spent in a retry loop for the current task + */ + public Duration getTotalRetryTime() { + return this.totalRetryTime; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java b/durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java new file mode 100644 index 0000000000..ad246a0c65 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/RetryHandler.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Functional interface for implementing custom task retry handlers. + * + *

It's important to remember that retry handler code is an extension of the orchestrator code and must + * therefore comply with all the determinism requirements of orchestrator code.

+ */ +@FunctionalInterface +public interface RetryHandler { + /** + * Invokes the retry handler logic and returns a value indicating whether to continue retrying. + * + * @param context retry context that's updated between each retry attempt + * @return {@code true} to continue retrying or {@code false} to stop retrying. + */ + boolean handle(RetryContext context); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java b/durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java new file mode 100644 index 0000000000..9efd912b1e --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/RetryPolicy.java @@ -0,0 +1,176 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.util.Objects; + +/** + * A declarative retry policy that can be configured for activity or sub-orchestration calls. + */ +public final class RetryPolicy { + + private int maxNumberOfAttempts; + private Duration firstRetryInterval; + private double backoffCoefficient = 1.0; + private Duration maxRetryInterval = Duration.ZERO; + private Duration retryTimeout = Duration.ZERO; + + /** + * Creates a new {@code RetryPolicy} object. + * + * @param maxNumberOfAttempts the maximum number of task invocation attempts; must be 1 or greater + * @param firstRetryInterval the amount of time to delay between the first and second attempt + * @throws IllegalArgumentException if {@code maxNumberOfAttempts} is zero or negative + */ + public RetryPolicy(int maxNumberOfAttempts, Duration firstRetryInterval) { + this.setMaxNumberOfAttempts(maxNumberOfAttempts); + this.setFirstRetryInterval(firstRetryInterval); + } + + /** + * Sets the maximum number of task invocation attempts; must be 1 or greater. + * + *

This value represents the number of times to attempt to execute the task. It does not represent + * the maximum number of times to retry the task. This is why the number must be 1 or greater.

+ * + * @param maxNumberOfAttempts the maximum number of attempts; must be 1 or greater + * @return this retry policy object + * @throws IllegalArgumentException if {@code maxNumberOfAttempts} is zero or negative + */ + public RetryPolicy setMaxNumberOfAttempts(int maxNumberOfAttempts) { + if (maxNumberOfAttempts <= 0) { + throw new IllegalArgumentException("The value for maxNumberOfAttempts must be greater than zero."); + } + this.maxNumberOfAttempts = maxNumberOfAttempts; + return this; + } + + /** + * Sets the amount of time to delay between the first and second attempt. + * + * @param firstRetryInterval the amount of time to delay between the first and second attempt + * @return this retry policy object + * @throws IllegalArgumentException if {@code firstRetryInterval} is {@code null}, zero, or negative. + */ + public RetryPolicy setFirstRetryInterval(Duration firstRetryInterval) { + if (firstRetryInterval == null) { + throw new IllegalArgumentException("firstRetryInterval cannot be null."); + } + if (firstRetryInterval.isZero() || firstRetryInterval.isNegative()) { + throw new IllegalArgumentException("The value for firstRetryInterval must be greater than zero."); + } + this.firstRetryInterval = firstRetryInterval; + return this; + } + + /** + * Sets the exponential backoff coefficient used to determine the delay between subsequent retries. + * Must be 1.0 or greater. + * + *

To avoid extremely long delays between retries, consider also specifying a maximum retry interval using the + * {@link #setMaxRetryInterval} method.

+ * + * @param backoffCoefficient the exponential backoff coefficient + * @return this retry policy object + * @throws IllegalArgumentException if {@code backoffCoefficient} is less than 1.0 + */ + public RetryPolicy setBackoffCoefficient(double backoffCoefficient) { + if (backoffCoefficient < 1.0) { + throw new IllegalArgumentException("The value for backoffCoefficient must be greater or equal to 1.0."); + } + this.backoffCoefficient = backoffCoefficient; + return this; + } + + /** + * Sets the maximum time to delay between attempts. + * + *

It's recommended to set a maximum retry interval whenever using a backoff coefficient that's greater than the + * default of 1.0.

+ * + * @param maxRetryInterval the maximum time to delay between attempts or {@code null} to remove the maximum retry + * interval + * @return this retry policy object + */ + public RetryPolicy setMaxRetryInterval(@Nullable Duration maxRetryInterval) { + if (maxRetryInterval != null && maxRetryInterval.compareTo(this.firstRetryInterval) < 0) { + throw new IllegalArgumentException("The value for maxRetryInterval must be greater than or equal to the value " + + "for firstRetryInterval."); + } + this.maxRetryInterval = maxRetryInterval; + return this; + } + + /** + * Sets the overall timeout for retries, regardless of the retry count. + * + * @param retryTimeout the overall timeout for retries + * @return this retry policy object + */ + public RetryPolicy setRetryTimeout(Duration retryTimeout) { + if (retryTimeout == null || retryTimeout.compareTo(this.firstRetryInterval) < 0) { + throw new IllegalArgumentException("The value for retryTimeout cannot be null and must be greater than or equal " + + "to the value for firstRetryInterval."); + } + this.retryTimeout = retryTimeout; + return this; + } + + /** + * Gets the configured maximum number of task invocation attempts. + * + * @return the configured maximum number of task invocation attempts. + */ + public int getMaxNumberOfAttempts() { + return this.maxNumberOfAttempts; + } + + /** + * Gets the configured amount of time to delay between the first and second attempt. + * + * @return the configured amount of time to delay between the first and second attempt + */ + public Duration getFirstRetryInterval() { + return this.firstRetryInterval; + } + + /** + * Gets the configured exponential backoff coefficient used to determine the delay between subsequent retries. + * + * @return the configured exponential backoff coefficient used to determine the delay between subsequent retries + */ + public double getBackoffCoefficient() { + return this.backoffCoefficient; + } + + /** + * Gets the configured maximum time to delay between attempts. + * + * @return the configured maximum time to delay between attempts + */ + public Duration getMaxRetryInterval() { + return this.maxRetryInterval; + } + + /** + * Gets the configured overall timeout for retries. + * + * @return the configured overall timeout for retries + */ + public Duration getRetryTimeout() { + return this.retryTimeout; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/Task.java b/durabletask-client/src/main/java/io/dapr/durabletask/Task.java new file mode 100644 index 0000000000..a3f3313816 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/Task.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.interruption.OrchestratorBlockedException; + +import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import java.util.function.Function; + +/** + * Represents an asynchronous operation in a durable orchestration. + * + *

{@code Task} instances are created by methods on the {@link TaskOrchestrationContext} class, which is available + * in {@link TaskOrchestration} implementations. For example, scheduling an activity will return a task.

+ *
+ * Task{@literal <}int{@literal >} activityTask = ctx.callActivity("MyActivity", someInput, int.class);
+ * 
+ *

Orchestrator code uses the {@link #await()} method to block on the completion of the task and retrieve the result. + * If the task is not yet complete, the {@code await()} method will throw an {@link OrchestratorBlockedException}, which + * pauses the orchestrator's execution so that it can save its progress into durable storage and schedule any + * outstanding work. When the task is complete, the orchestrator will run again from the beginning and the next time + * the task's {@code await()} method is called, the result will be returned, or a {@link TaskFailedException} will be + * thrown if the result of the task was an unhandled exception.

+ *

Note that orchestrator code must never catch {@code OrchestratorBlockedException} because doing so can cause the + * orchestration instance to get permanently stuck.

+ * + * @param the return type of the task + */ +public abstract class Task { + final CompletableFuture future; + + Task(CompletableFuture future) { + this.future = future; + } + + /** + * Returns {@code true} if completed in any fashion: normally, with an exception, or via cancellation. + * + * @return {@code true} if completed, otherwise {@code false} + */ + public boolean isDone() { + return this.future.isDone(); + } + + /** + * Returns {@code true} if the task was cancelled. + * + * @return {@code true} if the task was cancelled, otherwise {@code false} + */ + public boolean isCancelled() { + return this.future.isCancelled(); + } + + /** + * Blocks the orchestrator until this task to complete, and then returns its result. + * + * @return the result of the task + */ + public abstract V await(); + + /** + * Returns a new {@link Task} that, when this Task completes normally, + * is executed with this Task's result as the argument to the supplied function. + * + * @param fn the function to use to compute the value of the returned Task + * @param the function's return type + * @return the new Task + */ + public abstract Task thenApply(Function fn); + + /** + * Returns a new {@link Task} that, when this Task completes normally, + * is executed with this Task's result as the argument to the supplied action. + * + * @param fn the function to use to compute the value of the returned Task + * @return the new Task + */ + public abstract Task thenAccept(Consumer fn); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java new file mode 100644 index 0000000000..27e4291e95 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivity.java @@ -0,0 +1,45 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Common interface for task activity implementations. + * + *

Activities are the basic unit of work in a durable task orchestration. Activities are the tasks that are + * orchestrated in the business process. For example, you might create an orchestrator to process an order. The tasks + * ay involve checking the inventory, charging the customer, and creating a shipment. Each task would be a separate + * activity. These activities may be executed serially, in parallel, or some combination of both.

+ * + *

Unlike task orchestrators, activities aren't restricted in the type of work you can do in them. Activity functions + * are frequently used to make network calls or run CPU intensive operations. An activity can also return data back to + * the orchestrator function. The Durable Task runtime guarantees that each called activity function will be executed + * at least once during an orchestration's execution.

+ * + *

Because activities only guarantee at least once execution, it's recommended that activity logic be implemented as + * idempotent whenever possible.

+ * + *

Activities are scheduled by orchestrators using one of the {@link TaskOrchestrationContext#callActivity} method + * overloads.

+ */ +@FunctionalInterface +public interface TaskActivity { + /** + * Executes the activity logic and returns a value which will be serialized and returned to the calling orchestrator. + * + * @param ctx provides information about the current activity execution, like the activity's name and the input + * data provided to it by the orchestrator. + * @return any serializable value to be returned to the calling orchestrator. + */ + Object run(TaskActivityContext ctx); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java new file mode 100644 index 0000000000..b2043b51ee --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityContext.java @@ -0,0 +1,51 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Interface that provides {@link TaskActivity} implementations with activity context, such as an activity's name and + * its input. + */ +public interface TaskActivityContext { + /** + * Gets the name of the current task activity. + * + * @return the name of the current task activity + */ + String getName(); + + /** + * Gets the deserialized activity input. + * + * @param targetType the {@link Class} object associated with {@code T} + * @param the target type to deserialize the input into + * @return the deserialized activity input value + */ + T getInput(Class targetType); + + + /** + * Gets the execution id of the current task activity. + * + * @return the execution id of the current task activity + */ + String getTaskExecutionId(); + + /** + * Gets the task id of the current task activity. + * + * @return the task id of the current task activity + */ + int getTaskId(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java new file mode 100644 index 0000000000..a8ef6c67e0 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityExecutor.java @@ -0,0 +1,96 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import java.util.HashMap; +import java.util.logging.Logger; + +final class TaskActivityExecutor { + private final HashMap activityFactories; + private final DataConverter dataConverter; + private final Logger logger; + + public TaskActivityExecutor( + HashMap activityFactories, + DataConverter dataConverter, + Logger logger) { + this.activityFactories = activityFactories; + this.dataConverter = dataConverter; + this.logger = logger; + } + + public String execute(String taskName, String input, String taskExecutionId, int taskId) throws Throwable { + TaskActivityFactory factory = this.activityFactories.get(taskName); + if (factory == null) { + throw new IllegalStateException( + String.format("No activity task named '%s' is registered.", taskName)); + } + + TaskActivity activity = factory.create(); + if (activity == null) { + throw new IllegalStateException( + String.format("The task factory '%s' returned a null TaskActivity object.", taskName)); + } + + TaskActivityContextImpl context = new TaskActivityContextImpl(taskName, input, taskExecutionId, taskId); + + // Unhandled exceptions are allowed to escape + Object output = activity.run(context); + if (output != null) { + return this.dataConverter.serialize(output); + } + + return null; + } + + private class TaskActivityContextImpl implements TaskActivityContext { + private final String name; + private final String rawInput; + private final String taskExecutionId; + private final int taskId; + + private final DataConverter dataConverter = TaskActivityExecutor.this.dataConverter; + + public TaskActivityContextImpl(String activityName, String rawInput, String taskExecutionId, int taskId) { + this.name = activityName; + this.rawInput = rawInput; + this.taskExecutionId = taskExecutionId; + this.taskId = taskId; + } + + @Override + public String getName() { + return this.name; + } + + @Override + public T getInput(Class targetType) { + if (this.rawInput == null) { + return null; + } + + return this.dataConverter.deserialize(this.rawInput, targetType); + } + + @Override + public String getTaskExecutionId() { + return this.taskExecutionId; + } + + @Override + public int getTaskId() { + return this.taskId; + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java new file mode 100644 index 0000000000..e3ef45a95b --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskActivityFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Factory interface for producing {@link TaskActivity} implementations. + */ +public interface TaskActivityFactory { + /** + * Gets the name of the activity this factory creates. + * + * @return the name of the activity + */ + String getName(); + + /** + * Creates a new instance of {@link TaskActivity}. + * + * @return the created activity instance + */ + TaskActivity create(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java new file mode 100644 index 0000000000..5b79882ed8 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskCanceledException.java @@ -0,0 +1,26 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +//@TODO: This should inherit from Exception, not TaskFailedException + +/** + * Represents a task cancellation, either because of a timeout or because of an explicit cancellation operation. + */ +public final class TaskCanceledException extends TaskFailedException { + // Only intended to be created within this package + TaskCanceledException(String message, String taskName, int taskId) { + super(message, taskName, taskId, new FailureDetails(TaskCanceledException.class.getName(), message, "", true)); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java new file mode 100644 index 0000000000..377eecb426 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskFailedException.java @@ -0,0 +1,76 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Exception that gets thrown when awaiting a {@link Task} for an activity or sub-orchestration that fails with an + * unhandled exception. + *

Detailed information associated with a particular task failure can be retrieved + * using the {@link #getErrorDetails()} method.

+ */ +public class TaskFailedException extends RuntimeException { + private final FailureDetails details; + private final String taskName; + private final int taskId; + + TaskFailedException(String taskName, int taskId, FailureDetails details) { + this(getExceptionMessage(taskName, taskId, details), taskName, taskId, details); + } + + TaskFailedException(String message, String taskName, int taskId, FailureDetails details) { + super(message); + this.taskName = taskName; + this.taskId = taskId; + this.details = details; + } + + /** + * Gets the ID of the failed task. + * + *

Each durable task (activities, timers, sub-orchestrations, etc.) scheduled by a task orchestrator has an + * auto-incrementing ID associated with it. This ID is used to distinguish tasks from one another, even if, for + * example, they are tasks that call the same activity. This ID can therefore be used to more easily correlate a + * specific task failure to a specific task.

+ * + * @return the ID of the failed task + */ + public int getTaskId() { + return this.taskId; + } + + /** + * Gets the name of the failed task. + * + * @return the name of the failed task + */ + public String getTaskName() { + return this.taskName; + } + + /** + * Gets the details of the task failure, including exception information. + * + * @return the details of the task failure + */ + public FailureDetails getErrorDetails() { + return this.details; + } + + private static String getExceptionMessage(String taskName, int taskId, FailureDetails details) { + return String.format("Task '%s' (#%d) failed with an unhandled exception: %s", + taskName, + taskId, + details.getErrorMessage()); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java new file mode 100644 index 0000000000..e23ee54b77 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOptions.java @@ -0,0 +1,171 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Options that can be used to control the behavior of orchestrator and activity task execution. + */ +public final class TaskOptions { + private final RetryPolicy retryPolicy; + private final RetryHandler retryHandler; + private final String appID; + + private TaskOptions(RetryPolicy retryPolicy, RetryHandler retryHandler, String appID) { + this.retryPolicy = retryPolicy; + this.retryHandler = retryHandler; + this.appID = appID; + } + + /** + * Creates a new builder for {@code TaskOptions}. + * + * @return a new builder instance + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a new {@code TaskOptions} object with default values. + * + * @return a new TaskOptions instance with no configuration + */ + public static TaskOptions create() { + return new Builder().build(); + } + + /** + * Creates a new {@code TaskOptions} object from a {@link RetryPolicy}. + * + * @param retryPolicy the retry policy to use in the new {@code TaskOptions} object. + * @return a new TaskOptions instance with the specified retry policy + */ + public static TaskOptions withRetryPolicy(RetryPolicy retryPolicy) { + return new Builder().retryPolicy(retryPolicy).build(); + } + + /** + * Creates a new {@code TaskOptions} object from a {@link RetryHandler}. + * + * @param retryHandler the retry handler to use in the new {@code TaskOptions} object. + * @return a new TaskOptions instance with the specified retry handler + */ + public static TaskOptions withRetryHandler(RetryHandler retryHandler) { + return new Builder().retryHandler(retryHandler).build(); + } + + /** + * Creates a new {@code TaskOptions} object with the specified app ID. + * + * @param appID the app ID to use for cross-app workflow routing + * @return a new TaskOptions instance with the specified app ID + */ + public static TaskOptions withAppID(String appID) { + return new Builder().appID(appID).build(); + } + + boolean hasRetryPolicy() { + return this.retryPolicy != null; + } + + /** + * Gets the configured {@link RetryPolicy} value or {@code null} if none was configured. + * + * @return the configured retry policy + */ + public RetryPolicy getRetryPolicy() { + return this.retryPolicy; + } + + boolean hasRetryHandler() { + return this.retryHandler != null; + } + + /** + * Gets the configured {@link RetryHandler} value or {@code null} if none was configured. + * + * @return the configured retry handler. + */ + public RetryHandler getRetryHandler() { + return this.retryHandler; + } + + /** + * Gets the configured app ID value or {@code null} if none was configured. + * + * @return the configured app ID + */ + public String getAppID() { + return this.appID; + } + + boolean hasAppID() { + return this.appID != null && !this.appID.isEmpty(); + } + + /** + * Builder for creating {@code TaskOptions} instances. + */ + public static final class Builder { + private RetryPolicy retryPolicy; + private RetryHandler retryHandler; + private String appID; + + private Builder() { + // Private constructor -enforces using TaskOptions.builder() + } + + /** + * Sets the retry policy for the task options. + * + * @param retryPolicy the retry policy to use + * @return this builder instance for method chaining + */ + public Builder retryPolicy(RetryPolicy retryPolicy) { + this.retryPolicy = retryPolicy; + return this; + } + + /** + * Sets the retry handler for the task options. + * + * @param retryHandler the retry handler to use + * @return this builder instance for method chaining + */ + public Builder retryHandler(RetryHandler retryHandler) { + this.retryHandler = retryHandler; + return this; + } + + /** + * Sets the app ID for cross-app workflow routing. + * + * @param appID the app ID to use + * @return this builder instance for method chaining + */ + public Builder appID(String appID) { + this.appID = appID; + return this; + } + + /** + * Builds a new {@code TaskOptions} instance with the configured values. + * + * @return a new TaskOptions instance + */ + public TaskOptions build() { + return new TaskOptions(this.retryPolicy, this.retryHandler, this.appID); + } + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java new file mode 100644 index 0000000000..8935313779 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestration.java @@ -0,0 +1,82 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Common interface for task orchestrator implementations. + * + *

Task orchestrators describe how actions are executed and the order in which actions are executed. Orchestrators + * don't call into external services or do complex computation directly. Rather, they delegate these tasks to + * activities, which perform the actual work.

+ * + *

Orchestrators can be scheduled using the {@link DurableTaskClient#scheduleNewOrchestrationInstance} method + * overloads. Orchestrators can also invoke child orchestrators using the + * {@link TaskOrchestrationContext#callSubOrchestrator} method overloads.

+ * + *

Orchestrators may be replayed multiple times to rebuild their local state after being reloaded into memory. + * Orchestrator code must therefore be deterministic to ensure no unexpected side effects from execution + * replay. To account for this behavior, there are several coding constraints to be aware of:

+ *
    + *
  • + * An orchestrator must not generate random numbers or random UUIDs, get the current date, read environment + * variables, or do anything else that might result in a different value if the code is replayed in the future. + * Activities and built-in methods on the {@link TaskOrchestrationContext} parameter, like + * {@link TaskOrchestrationContext#getCurrentInstant()}, can be used to work around these restrictions. + *
  • + *
  • + * Orchestrator logic must be executed on the orchestrator thread. Creating new threads or scheduling callbacks + * onto background threads is forbidden and may result in failures or other unexpected behavior. + *
  • + *
  • + * Avoid infinite loops as they could cause the application to run out of memory. Instead, ensure that loops are + * bounded or use {@link TaskOrchestrationContext#continueAsNew} to restart an orchestrator with a new input. + *
  • + *
  • + * Avoid logging directly in the orchestrator code because log messages will be duplicated on each replay. + * Instead, check the value of the {@link TaskOrchestrationContext#getIsReplaying} method and write log messages + * only when it is {@code false}. + *
  • + *
+ * + *

Orchestrator code is tightly coupled with its execution history so special care must be taken when making changes + * to orchestrator code. For example, adding or removing activity tasks to an orchestrator's code may cause a + * mismatch between code and history for in-flight orchestrations. To avoid potential issues related to orchestrator + * versioning, consider applying the following strategies:

+ *
    + *
  • + * Deploy multiple versions of applications side-by-side allowing new code to run independently of old code. + *
  • + *
  • + * Rather than changing existing orchestrators, create new orchestrators that implement the modified behavior. + *
  • + *
  • + * Ensure all in-flight orchestrations are complete before applying code changes to existing orchestrator code. + *
  • + *
  • + * If possible, only make changes to orchestrator code that won't impact its history or execution path. For + * example, renaming variables or adding log statements have no impact on an orchestrator's execution path and + * are safe to apply to existing orchestrations. + *
  • + *
+ */ +@FunctionalInterface +public interface TaskOrchestration { + /** + * Executes the orchestrator logic. + * + * @param ctx provides access to methods for scheduling durable tasks and getting information about the current + * orchestration instance. + */ + void run(TaskOrchestrationContext ctx); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java new file mode 100644 index 0000000000..df0c95ec82 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationContext.java @@ -0,0 +1,598 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +/** + * Used by orchestrators to perform actions such as scheduling tasks, durable timers, waiting for external events, + * and for getting basic information about the current orchestration. + */ +public interface TaskOrchestrationContext { + /** + * Gets the name of the current task orchestration. + * + * @return the name of the current task orchestration + */ + String getName(); + + /** + * Gets the deserialized input of the current task orchestration. + * + * @param targetType the {@link Class} object associated with {@code V} + * @param the expected type of the orchestrator input + * @return the deserialized input as an object of type {@code V} or {@code null} if no input was provided. + */ + V getInput(Class targetType); + + /** + * Gets the unique ID of the current orchestration instance. + * + * @return the unique ID of the current orchestration instance + */ + String getInstanceId(); + + /** + * Gets the app ID of the current orchestration instance, if available. + * This is used for cross-app workflow routing. + * + * @return the app ID of the current orchestration instance, or null if not available + */ + String getAppId(); + + /** + * Gets the current orchestration time in UTC. + * + * @return the current orchestration time in UTC + */ + Instant getCurrentInstant(); + + /** + * Gets a value indicating whether the orchestrator is currently replaying a previous execution. + * + *

Orchestrator functions are "replayed" after being unloaded from memory to reconstruct local variable state. + * During a replay, previously executed tasks will be completed automatically with previously seen values + * that are stored in the orchestration history. One the orchestrator reaches the point in the orchestrator + * where it's no longer replaying existing history, this method will return {@code false}.

+ * + *

You can use this method if you have logic that needs to run only when not replaying. For example, + * certain types of application logging may become too noisy when duplicated as part of replay. The + * application code could check to see whether the function is being replayed and then issue the log statements + * when this value is {@code false}.

+ * + * @return {@code true} if the orchestrator is replaying, otherwise {@code false} + */ + boolean getIsReplaying(); + + /** + * Returns a new {@code Task} that is completed when all tasks in {@code tasks} completes. + * See {@link #allOf(Task[])} for more detailed information. + * + * @param tasks the list of {@code Task} objects + * @param the return type of the {@code Task} objects + * @return a new {@code Task} that is completed when any of the given {@code Task}s complete + * @see #allOf(Task[]) + */ + Task> allOf(List> tasks); + + // TODO: Update the description of allOf to be more specific about the exception behavior. + + // https://github.io.dapr.durabletask-java/issues/54 + + /** + * Returns a new {@code Task} that is completed when all the given {@code Task}s complete. If any of the given + * {@code Task}s complete with an exception, the returned {@code Task} will also complete with + * an {@link CompositeTaskFailedException} containing details of the first encountered failure. + * The value of the returned {@code Task} is an ordered list of + * the return values of the given tasks. If no tasks are provided, returns a {@code Task} completed with value + * {@code null}. + * + *

This method is useful for awaiting the completion of a set of independent tasks before continuing to the next + * step in the orchestration, as in the following example:

+ *
{@code
+   * Task t1 = ctx.callActivity("MyActivity", String.class);
+   * Task t2 = ctx.callActivity("MyActivity", String.class);
+   * Task t3 = ctx.callActivity("MyActivity", String.class);
+   *
+   * List orderedResults = ctx.allOf(t1, t2, t3).await();
+   * }
+ * + *

Exceptions in any of the given tasks results in an unchecked {@link CompositeTaskFailedException}. + * This exception can be inspected to obtain failure details of individual {@link Task}s.

+ *
{@code
+   * try {
+   *     List orderedResults = ctx.allOf(t1, t2, t3).await();
+   * } catch (CompositeTaskFailedException e) {
+   *     List exceptions = e.getExceptions()
+   * }
+   * }
+ * + * @param tasks the {@code Task}s + * @param the return type of the {@code Task} objects + * @return the values of the completed {@code Task} objects in the same order as the source list + */ + default Task> allOf(Task... tasks) { + return this.allOf(Arrays.asList(tasks)); + } + + /** + * Returns a new {@code Task} that is completed when any of the tasks in {@code tasks} completes. + * See {@link #anyOf(Task[])} for more detailed information. + * + * @param tasks the list of {@code Task} objects + * @return a new {@code Task} that is completed when any of the given {@code Task}s complete + * @see #anyOf(Task[]) + */ + Task> anyOf(List> tasks); + + /** + * Returns a new {@code Task} that is completed when any of the given {@code Task}s complete. The value of the + * new {@code Task} is a reference to the completed {@code Task} object. If no tasks are provided, returns a + * {@code Task} that never completes. + * + *

This method is useful for waiting on multiple concurrent tasks and performing a task-specific operation when the + * first task completes, as in the following example:

+ *
{@code
+   * Task event1 = ctx.waitForExternalEvent("Event1");
+   * Task event2 = ctx.waitForExternalEvent("Event2");
+   * Task event3 = ctx.waitForExternalEvent("Event3");
+   *
+   * Task winner = ctx.anyOf(event1, event2, event3).await();
+   * if (winner == event1) {
+   *     // ...
+   * } else if (winner == event2) {
+   *     // ...
+   * } else if (winner == event3) {
+   *     // ...
+   * }
+   * }
+ * + *

The {@code anyOf} method can also be used for implementing long-running timeouts, as in the following example: + *

+ *
{@code
+   * Task activityTask = ctx.callActivity("SlowActivity");
+   * Task timeoutTask = ctx.createTimer(Duration.ofMinutes(30));
+   *
+   * Task winner = ctx.anyOf(activityTask, timeoutTask).await();
+   * if (winner == activityTask) {
+   *     // completion case
+   * } else {
+   *     // timeout case
+   * }
+   * }
+ * + * @param tasks the list of {@code Task} objects + * @return a new {@code Task} that is completed when any of the given {@code Task}s complete + */ + default Task> anyOf(Task... tasks) { + return this.anyOf(Arrays.asList(tasks)); + } + + /** + * Creates a durable timer that expires after the specified delay. + * + *

Specifying a long delay (for example, a delay of a few days or more) may result in the creation of multiple, + * internally-managed durable timers. The orchestration code doesn't need to be aware of this behavior. However, + * it may be visible in framework logs and the stored history state.

+ * + * @param delay the amount of time before the timer should expire + * @return a new {@code Task} that completes after the specified delay + */ + Task createTimer(Duration delay); + + /** + * Creates a durable timer that expires after the specified timestamp with specific zone. + * + *

Specifying a long delay (for example, a delay of a few days or more) may result in the creation of multiple, + * internally-managed durable timers. The orchestration code doesn't need to be aware of this behavior. However, + * it may be visible in framework logs and the stored history state.

+ * + * @param zonedDateTime timestamp with specific zone when the timer should expire + * @return a new {@code Task} that completes after the specified delay + */ + Task createTimer(ZonedDateTime zonedDateTime); + + /** + * Transitions the orchestration into the {@link OrchestrationRuntimeStatus#COMPLETED} state with the given output. + * + * @param output the serializable output of the completed orchestration + */ + void complete(Object output); + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes. If the activity completes successfully, the returned {@code Task}'s + * value will be the activity's output. If the activity fails, the returned {@code Task} will complete exceptionally + * with a {@link TaskFailedException}. + * + *

Activities are the basic unit of work in a durable task orchestration. Unlike orchestrators, which are not + * allowed to do any I/O or call non-deterministic APIs, activities have no implementation restrictions.

+ * + *

An activity may execute in the local machine or a remote machine. The exact behavior depends on the underlying + * storage provider, which is responsible for distributing tasks across machines. In general, you should never make + * any assumptions about where an activity will run. You should also assume at-least-once execution guarantees for + * activities, meaning that an activity may be executed twice if, for example, there is a process failure before + * the activities result is saved into storage.

+ * + *

Both the inputs and outputs of activities are serialized and stored in durable storage. It's highly recommended + * to not include any sensitive data in activity inputs or outputs. It's also recommended to not use large payloads + * for activity inputs and outputs, which can result in expensive serialization and network utilization. For data + * that cannot be cheaply or safely persisted to storage, it's recommended to instead pass references + * (for example, a URL to a storage blog) to the data and have activities fetch the data directly as part of their + * implementation.

+ * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @param options additional options that control the execution and processing of the activity + * @param returnType the expected class type of the activity output + * @param the expected type of the activity output + * @return a new {@link Task} that completes when the activity completes or fails + */ + Task callActivity(String name, Object input, TaskOptions options, Class returnType); + + /** + * Asynchronously invokes an activity by name and returns a new {@link Task} that completes when the activity + * completes. See {@link #callActivity(String, Object, TaskOptions, Class)} for a complete description. + * + * @param name the name of the activity to call + * @return a new {@link Task} that completes when the activity completes or fails + * @see #callActivity(String, Object, TaskOptions, Class) + */ + default Task callActivity(String name) { + return this.callActivity(name, Void.class); + } + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes. See {@link #callActivity(String, Object, TaskOptions, Class)} for a + * complete description. + * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Object input) { + return this.callActivity(name, input, null, Void.class); + } + + /** + * Asynchronously invokes an activity by name and returns a new {@link Task} that completes when the activity + * completes. If the activity completes successfully, the returned {@code Task}'s value will be the activity's + * output. See {@link #callActivity(String, Object, TaskOptions, Class)} for a complete description. + * + * @param name the name of the activity to call + * @param returnType the expected class type of the activity output + * @param the expected type of the activity output + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Class returnType) { + return this.callActivity(name, null, null, returnType); + } + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes.If the activity completes successfully, the returned {@code Task}'s + * value will be the activity's output. See {@link #callActivity(String, Object, TaskOptions, Class)} for a + * complete description. + * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @param returnType the expected class type of the activity output + * @param the expected type of the activity output + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Object input, Class returnType) { + return this.callActivity(name, input, null, returnType); + } + + /** + * Asynchronously invokes an activity by name and with the specified input value and returns a new {@link Task} + * that completes when the activity completes. See {@link #callActivity(String, Object, TaskOptions, Class)} for a + * complete description. + * + * @param name the name of the activity to call + * @param input the serializable input to pass to the activity + * @param options additional options that control the execution and processing of the activity + * @return a new {@link Task} that completes when the activity completes or fails + */ + default Task callActivity(String name, Object input, TaskOptions options) { + return this.callActivity(name, input, options, Void.class); + } + + /** + * Restarts the orchestration with a new input and clears its history. See {@link #continueAsNew(Object, boolean)} + * for a full description. + * + * @param input the serializable input data to re-initialize the instance with + */ + default void continueAsNew(Object input) { + this.continueAsNew(input, true); + } + + /** + * Restarts the orchestration with a new input and clears its history. + * + *

This method is primarily designed for eternal orchestrations, which are orchestrations that + * may not ever complete. It works by restarting the orchestration, providing it with a new input, + * and truncating the existing orchestration history. It allows an orchestration to continue + * running indefinitely without having its history grow unbounded. The benefits of periodically + * truncating history include decreased memory usage, decreased storage volumes, and shorter orchestrator + * replays when rebuilding state.

+ * + *

The results of any incomplete tasks will be discarded when an orchestrator calls {@code continueAsNew}. + * For example, if a timer is scheduled and then {@code continueAsNew} is called before the timer fires, the timer + * event will be discarded. The only exception to this is external events. By default, if an external event is + * received by an orchestration but not yet processed, the event is saved in the orchestration state unit it is + * received by a call to {@link #waitForExternalEvent}. These events will remain in memory + * even after an orchestrator restarts using {@code continueAsNew}. This behavior can be disabled by specifying + * {@code false} for the {@code preserveUnprocessedEvents} parameter value.

+ * + *

Orchestrator implementations should complete immediately after calling the{@code continueAsNew} method.

+ * + * @param input the serializable input data to re-initialize the instance with + * @param preserveUnprocessedEvents {@code true} to push unprocessed external events into the new orchestration + * history, otherwise {@code false} + */ + void continueAsNew(Object input, boolean preserveUnprocessedEvents); + + /** + * Create a new Uuid that is safe for replay within an orchestration or operation. + * + *

The default implementation of this method creates a name-based Uuid + * using the algorithm from RFC 4122 §4.3. The name input used to generate + * this value is a combination of the orchestration instance ID and an + * internally managed sequence number. + *

+ * + * @return a deterministic Uuid + */ + default UUID newUuid() { + throw new RuntimeException("No implementation found."); + } + + /** + * Sends an external event to another orchestration instance. + * + * @param instanceID the unique ID of the receiving orchestration instance. + * @param eventName the name of the event to send + */ + default void sendEvent(String instanceID, String eventName) { + this.sendEvent(instanceID, eventName, null); + } + + /** + * Sends an external event to another orchestration instance. + * + * @param instanceId the unique ID of the receiving orchestration instance. + * @param eventName the name of the event to send + * @param eventData the payload of the event to send + */ + void sendEvent(String instanceId, String eventName, Object eventData); + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + * @see #callSubOrchestrator(String, Object, String, TaskOptions, Class) + */ + default Task callSubOrchestrator(String name) { + return this.callSubOrchestrator(name, null); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input) { + return this.callSubOrchestrator(name, input, null); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param returnType the expected class type of the sub-orchestration output + * @param the expected type of the sub-orchestration output + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input, Class returnType) { + return this.callSubOrchestrator(name, input, null, returnType); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param instanceID the unique ID of the sub-orchestration + * @param returnType the expected class type of the sub-orchestration output + * @param the expected type of the sub-orchestration output + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input, String instanceID, Class returnType) { + return this.callSubOrchestrator(name, input, instanceID, null, returnType); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. + * + *

See {@link #callSubOrchestrator(String, Object, String, TaskOptions, Class)} for a full description.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param instanceID the unique ID of the sub-orchestration + * @param options additional options that control the execution and processing of the activity + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + default Task callSubOrchestrator(String name, Object input, String instanceID, TaskOptions options) { + return this.callSubOrchestrator(name, input, instanceID, options, Void.class); + } + + /** + * Asynchronously invokes another orchestrator as a sub-orchestration and returns a {@link Task} that completes + * when the sub-orchestration completes. If the sub-orchestration completes successfully, the returned + * {@code Task}'s value will be the activity's output. If the sub-orchestration fails, the returned {@code Task} + * will complete exceptionally with a {@link TaskFailedException}. + * + *

A sub-orchestration has its own instance ID, history, and status that is independent of the parent orchestrator + * that started it. There are many advantages to breaking down large orchestrations into sub-orchestrations:

+ *
    + *
  • + * Splitting large orchestrations into a series of smaller sub-orchestrations can make code more maintainable. + *
  • + *
  • + * Distributing orchestration logic across multiple compute nodes concurrently is useful if + * orchestration logic otherwise needs to coordinate a lot of tasks. + *
  • + *
  • + * Memory usage and CPU overhead can be reduced by keeping the history of parent orchestrations smaller. + *
  • + *
+ * + *

The disadvantage is that there is overhead associated with starting a sub-orchestration and processing its + * output. This is typically only an issue for very small orchestrations.

+ * + *

Because sub-orchestrations are independent of their parents, terminating a parent orchestration does not affect + * any sub-orchestrations. Sub-orchestrations must be terminated independently using their unique instance ID, + * which is specified using the {@code instanceID} parameter.

+ * + * @param name the name of the orchestrator to invoke + * @param input the serializable input to send to the sub-orchestration + * @param instanceID the unique ID of the sub-orchestration + * @param options additional options that control the execution and processing of the activity + * @param returnType the expected class type of the sub-orchestration output + * @param the expected type of the sub-orchestration output + * @return a new {@link Task} that completes when the sub-orchestration completes or fails + */ + Task callSubOrchestrator( + String name, + @Nullable Object input, + @Nullable String instanceID, + @Nullable TaskOptions options, + Class returnType); + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received or is canceled when {@code timeout} expires. + * + *

External clients can raise events to a waiting orchestration instance using the + * {@link DurableTaskClient#raiseEvent} method.

+ * + *

If the current orchestration is not yet waiting for an event named {@code name}, then the event will be saved in + * the orchestration instance state and dispatched immediately when this method is called. This event saving occurs + * even if the current orchestrator cancels the wait operation before the event is received.

+ * + *

Orchestrators can wait for the same event name multiple times, so waiting for multiple events with the same name + * is allowed. Each external event received by an orchestrator will complete just one task returned by this method. + *

+ * + * @param name the case-insensitive name of the event to wait for + * @param timeout the amount of time to wait before canceling the returned {@code Task} + * @param dataType the expected class type of the event data payload + * @param the expected type of the event data payload + * @return a new {@link Task} that completes when the external event is received or when {@code timeout} expires + * @throws TaskCanceledException if the specified {@code timeout} value expires before the event is received + */ + Task waitForExternalEvent(String name, Duration timeout, Class dataType) throws TaskCanceledException; + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received or is canceled when {@code timeout} expires. + * + *

See {@link #waitForExternalEvent(String, Duration, Class)} for a full description.

+ * + * @param name the case-insensitive name of the event to wait for + * @param timeout the amount of time to wait before canceling the returned {@code Task} + * @return a new {@link Task} that completes when the external event is received or when {@code timeout} expires + * @throws TaskCanceledException if the specified {@code timeout} value expires before the event is received + */ + default Task waitForExternalEvent(String name, Duration timeout) throws TaskCanceledException { + return this.waitForExternalEvent(name, timeout, Void.class); + } + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received. + * + *

See {@link #waitForExternalEvent(String, Duration, Class)} for a full description.

+ * + * @param name the case-insensitive name of the event to wait for + * @return a new {@link Task} that completes when the external event is received + */ + default Task waitForExternalEvent(String name) { + return this.waitForExternalEvent(name, Void.class); + } + + /** + * Waits for an event to be raised named {@code name} and returns a {@link Task} that completes when the event is + * received. + * + *

See {@link #waitForExternalEvent(String, Duration, Class)} for a full description.

+ * + * @param name the case-insensitive name of the event to wait for + * @param dataType the expected class type of the event data payload + * @param the expected type of the event data payload + * @return a new {@link Task} that completes when the external event is received + */ + default Task waitForExternalEvent(String name, Class dataType) { + try { + return this.waitForExternalEvent(name, null, dataType); + } catch (TaskCanceledException e) { + // This should never happen because of the max duration + throw new RuntimeException("An unexpected exception was throw while waiting for an external event.", e); + } + } + + /** + * Assigns a custom status value to the current orchestration. + * + *

The {@code customStatus} value is serialized and stored in orchestration state and will be made available to the + * orchestration status query APIs, such as {@link DurableTaskClient#getInstanceMetadata}. The serialized value + * must not exceed 16 KB of UTF-16 encoded text.

+ * + *

Use {@link #clearCustomStatus()} to remove the custom status value from the orchestration state.

+ * + * @param customStatus A serializable value to assign as the custom status value. + */ + void setCustomStatus(Object customStatus); + + /** + * Clears the orchestration's custom status. + */ + void clearCustomStatus(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java new file mode 100644 index 0000000000..7a3436b036 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationExecutor.java @@ -0,0 +1,1515 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import com.google.protobuf.StringValue; +import com.google.protobuf.Timestamp; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; +import io.dapr.durabletask.implementation.protobuf.OrchestratorService.ScheduleTaskAction.Builder; +import io.dapr.durabletask.interruption.ContinueAsNewInterruption; +import io.dapr.durabletask.interruption.OrchestratorBlockedException; +import io.dapr.durabletask.util.UuidGenerator; + +import javax.annotation.Nullable; +import java.time.Duration; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Queue; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.logging.Logger; + +final class TaskOrchestrationExecutor { + + private static final String EMPTY_STRING = ""; + private final HashMap orchestrationFactories; + private final DataConverter dataConverter; + private final Logger logger; + private final Duration maximumTimerInterval; + private final String appId; + + public TaskOrchestrationExecutor( + HashMap orchestrationFactories, + DataConverter dataConverter, + Duration maximumTimerInterval, + Logger logger, + String appId) { + this.orchestrationFactories = orchestrationFactories; + this.dataConverter = dataConverter; + this.maximumTimerInterval = maximumTimerInterval; + this.logger = logger; + this.appId = appId; // extracted from router + } + + public TaskOrchestratorResult execute(List pastEvents, + List newEvents) { + ContextImplTask context = new ContextImplTask(pastEvents, newEvents); + + boolean completed = false; + try { + // Play through the history events until either we've played through everything + // or we receive a yield signal + while (context.processNextEvent()) { + /* no method body */ + } + completed = true; + logger.finest("The orchestrator execution completed normally"); + } catch (OrchestratorBlockedException orchestratorBlockedException) { + logger.fine("The orchestrator has yielded and will await for new events."); + } catch (ContinueAsNewInterruption continueAsNewInterruption) { + logger.fine("The orchestrator has continued as new."); + context.complete(null); + } catch (Exception e) { + // The orchestrator threw an unhandled exception - fail it + // TODO: What's the right way to log this? + logger.warning("The orchestrator failed with an unhandled exception: " + e.toString()); + context.fail(new FailureDetails(e)); + } + + if ((context.continuedAsNew && !context.isComplete) || (completed && context.pendingActions.isEmpty() + && !context.waitingForEvents())) { + // There are no further actions for the orchestrator to take so auto-complete the orchestration. + context.complete(null); + } + + return new TaskOrchestratorResult(context.pendingActions.values(), context.getCustomStatus()); + } + + private class ContextImplTask implements TaskOrchestrationContext { + + private String orchestratorName; + private String rawInput; + private String instanceId; + private Instant currentInstant; + private boolean isComplete; + private boolean isSuspended; + private boolean isReplaying = true; + private int newUuidCounter; + private String appId; + + // LinkedHashMap to maintain insertion order when returning the list of pending actions + private final Map pendingActions = new LinkedHashMap<>(); + private final Map> openTasks = new HashMap<>(); + private final Map>> outstandingEvents = new LinkedHashMap<>(); + private final List unprocessedEvents = new LinkedList<>(); + private final Queue eventsWhileSuspended = new ArrayDeque<>(); + private final DataConverter dataConverter = TaskOrchestrationExecutor.this.dataConverter; + private final Duration maximumTimerInterval = TaskOrchestrationExecutor.this.maximumTimerInterval; + private final Logger logger = TaskOrchestrationExecutor.this.logger; + private final OrchestrationHistoryIterator historyEventPlayer; + private int sequenceNumber; + private boolean continuedAsNew; + private Object continuedAsNewInput; + private boolean preserveUnprocessedEvents; + private Object customStatus; + + public ContextImplTask(List pastEvents, + List newEvents) { + this.historyEventPlayer = new OrchestrationHistoryIterator(pastEvents, newEvents); + } + + @Override + public String getName() { + // TODO: Throw if name is null + return this.orchestratorName; + } + + private void setName(String name) { + // TODO: Throw if name is not null + this.orchestratorName = name; + } + + private void setInput(String rawInput) { + this.rawInput = rawInput; + } + + @Override + public T getInput(Class targetType) { + if (this.rawInput == null || this.rawInput.length() == 0) { + return null; + } + + return this.dataConverter.deserialize(this.rawInput, targetType); + } + + @Override + public String getInstanceId() { + // TODO: Throw if instance ID is null + return this.instanceId; + } + + private void setInstanceId(String instanceId) { + // TODO: Throw if instance ID is not null + this.instanceId = instanceId; + } + + @Override + public String getAppId() { + return this.appId; + } + + private void setAppId(String appId) { + this.appId = appId; + } + + @Override + public Instant getCurrentInstant() { + // TODO: Throw if instant is null + return this.currentInstant; + } + + private void setCurrentInstant(Instant instant) { + // This will be set multiple times as the orchestration progresses + this.currentInstant = instant; + } + + private String getCustomStatus() { + return this.customStatus != null ? this.dataConverter.serialize(this.customStatus) : EMPTY_STRING; + } + + @Override + public void setCustomStatus(Object customStatus) { + this.customStatus = customStatus; + } + + @Override + public void clearCustomStatus() { + this.setCustomStatus(null); + } + + @Override + public boolean getIsReplaying() { + return this.isReplaying; + } + + private void setDoneReplaying() { + this.isReplaying = false; + } + + public Task completedTask(V value) { + CompletableTask task = new CompletableTask<>(); + task.complete(value); + return task; + } + + @Override + public Task> allOf(List> tasks) { + Helpers.throwIfArgumentNull(tasks, "tasks"); + + CompletableFuture[] futures = tasks.stream() + .map(t -> t.future) + .toArray((IntFunction[]>) CompletableFuture[]::new); + + Function> resultPath = x -> { + List results = new ArrayList<>(futures.length); + + // All futures are expected to be completed at this point + for (CompletableFuture cf : futures) { + try { + results.add(cf.get()); + } catch (Exception ex) { + results.add(null); + } + } + return results; + }; + + Function> exceptionPath = throwable -> { + ArrayList exceptions = new ArrayList<>(futures.length); + for (CompletableFuture cf : futures) { + try { + cf.get(); + } catch (ExecutionException ex) { + exceptions.add((Exception) ex.getCause()); + } catch (Exception ex) { + exceptions.add(ex); + } + } + throw new CompositeTaskFailedException( + String.format( + "%d out of %d tasks failed with an exception. See the exceptions list for details.", + exceptions.size(), + futures.length), + exceptions); + }; + CompletableFuture> future = CompletableFuture.allOf(futures) + .thenApply(resultPath) + .exceptionally(exceptionPath); + + return new CompoundTask<>(tasks, future); + } + + @Override + public Task> anyOf(List> tasks) { + Helpers.throwIfArgumentNull(tasks, "tasks"); + + CompletableFuture[] futures = tasks.stream() + .map(t -> t.future) + .toArray((IntFunction[]>) CompletableFuture[]::new); + + CompletableFuture> future = CompletableFuture.anyOf(futures).thenApply(x -> { + // Return the first completed task in the list. Unlike the implementation in other languages, + // this might not necessarily be the first task that completed, so calling code shouldn't make + // assumptions about this. Note that changing this behavior later could be breaking. + for (Task task : tasks) { + if (task.isDone()) { + return task; + } + } + + // Should never get here + return completedTask(null); + }); + + return new CompoundTask(tasks, future); + } + + @Override + public Task callActivity( + String name, + @Nullable Object input, + @Nullable TaskOptions options, + Class returnType) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(name, "name"); + Helpers.throwIfArgumentNull(returnType, "returnType"); + + if (input instanceof TaskOptions) { + throw new IllegalArgumentException("TaskOptions cannot be used as an input. " + + "Did you call the wrong method overload?"); + } + + String serializedInput = this.dataConverter.serialize(input); + Builder scheduleTaskBuilder = OrchestratorService.ScheduleTaskAction.newBuilder().setName(name) + .setTaskExecutionId(newUuid().toString()); + if (serializedInput != null) { + scheduleTaskBuilder.setInput(StringValue.of(serializedInput)); + } + + // Add router information for cross-app routing + // Router always has a source app ID from EXECUTIONSTARTED event + OrchestratorService.TaskRouter.Builder routerBuilder = OrchestratorService.TaskRouter.newBuilder() + .setSourceAppID(this.appId); + + // Add target app ID if specified in options + if (options != null && options.hasAppID()) { + String targetAppId = options.getAppID(); + OrchestratorService.TaskRouter router = OrchestratorService.TaskRouter.newBuilder() + .setSourceAppID(this.appId) + .setTargetAppID(targetAppId) + .build(); + scheduleTaskBuilder.setRouter(router); + this.logger.fine(() -> String.format( + "cross app routing detected: source=%s, target=%s", + this.appId, targetAppId)); + } + TaskFactory taskFactory = () -> { + int id = this.sequenceNumber++; + OrchestratorService.ScheduleTaskAction scheduleTaskAction = scheduleTaskBuilder.build(); + OrchestratorService.OrchestratorAction.Builder actionBuilder = OrchestratorService.OrchestratorAction + .newBuilder() + .setId(id) + .setScheduleTask(scheduleTaskBuilder); + if (options != null && options.hasAppID()) { + String targetAppId = options.getAppID(); + OrchestratorService.TaskRouter actionRouter = OrchestratorService.TaskRouter.newBuilder() + .setSourceAppID(this.appId) + .setTargetAppID(targetAppId) + .build(); + actionBuilder.setRouter(actionRouter); + } + this.pendingActions.put(id, actionBuilder.build()); + + if (!this.isReplaying) { + this.logger.fine(() -> String.format( + "%s: calling activity '%s' (#%d) with serialized input: %s", + this.instanceId, + name, + id, + serializedInput != null ? serializedInput : "(null)")); + } + + CompletableTask task = new CompletableTask<>(); + TaskRecord record = new TaskRecord<>(task, name, returnType); + this.openTasks.put(id, record); + return task; + }; + + return this.createAppropriateTask(taskFactory, options); + } + + @Override + public void continueAsNew(Object input, boolean preserveUnprocessedEvents) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + + this.continuedAsNew = true; + this.continuedAsNewInput = input; + this.preserveUnprocessedEvents = preserveUnprocessedEvents; + + // The ContinueAsNewInterruption exception allows the orchestration to complete immediately and return back + // to the sidecar. + // We can send the current set of actions back to the worker and wait for new events to come in. + // This is *not* an exception - it's a normal part of orchestrator control flow. + throw new ContinueAsNewInterruption( + "The orchestrator invoked continueAsNew. This Throwable should never be caught by user code."); + } + + @Override + public UUID newUuid() { + final int version = 5; + final String hashV5 = "SHA-1"; + final String dnsNameSpace = "9e952958-5e33-4daf-827f-2fa12937b875"; + final String name = new StringBuilder(this.instanceId) + .append("-") + .append(this.currentInstant) + .append("-") + .append(this.newUuidCounter).toString(); + this.newUuidCounter++; + return UuidGenerator.generate(version, hashV5, UUID.fromString(dnsNameSpace), name); + } + + @Override + public void sendEvent(String instanceId, String eventName, Object eventData) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNullOrWhiteSpace(instanceId, "instanceId"); + + int id = this.sequenceNumber++; + String serializedEventData = this.dataConverter.serialize(eventData); + OrchestratorService.OrchestrationInstance.Builder orchestrationInstanceBuilder = + OrchestratorService.OrchestrationInstance.newBuilder() + .setInstanceId(instanceId); + OrchestratorService.SendEventAction.Builder builder = OrchestratorService + .SendEventAction.newBuilder().setInstance(orchestrationInstanceBuilder) + .setName(eventName); + if (serializedEventData != null) { + builder.setData(StringValue.of(serializedEventData)); + } + OrchestratorService.OrchestratorAction.Builder actionBuilder = OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setSendEvent(builder); + + this.pendingActions.put(id, actionBuilder.build()); + + if (!this.isReplaying) { + this.logger.fine(() -> String.format( + "%s: sending event '%s' (#%d) with serialized event data: %s", + this.instanceId, + eventName, + id, + serializedEventData != null ? serializedEventData : "(null)")); + } + } + + @Override + public Task callSubOrchestrator( + String name, + @Nullable Object input, + @Nullable String instanceId, + @Nullable TaskOptions options, + Class returnType) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(name, "name"); + Helpers.throwIfArgumentNull(returnType, "returnType"); + + if (input instanceof TaskOptions) { + throw new IllegalArgumentException("TaskOptions cannot be used as an input. " + + "Did you call the wrong method overload?"); + } + + String serializedInput = this.dataConverter.serialize(input); + OrchestratorService.CreateSubOrchestrationAction.Builder createSubOrchestrationActionBuilder = + OrchestratorService.CreateSubOrchestrationAction + .newBuilder().setName(name); + if (serializedInput != null) { + createSubOrchestrationActionBuilder.setInput(StringValue.of(serializedInput)); + } + + if (instanceId == null) { + instanceId = this.newUuid().toString(); + } + createSubOrchestrationActionBuilder.setInstanceId(instanceId); + + // TODO: @cicoyle - add suborchestration cross app logic here when its supported + TaskFactory taskFactory = () -> { + int id = this.sequenceNumber++; + this.pendingActions.put(id, OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setCreateSubOrchestration(createSubOrchestrationActionBuilder) + .build()); + + if (!this.isReplaying) { + this.logger.fine(() -> String.format( + "%s: calling sub-orchestration '%s' (#%d) with serialized input: %s", + this.instanceId, + name, + id, + serializedInput != null ? serializedInput : "(null)")); + } + + CompletableTask task = new CompletableTask<>(); + TaskRecord record = new TaskRecord<>(task, name, returnType); + this.openTasks.put(id, record); + return task; + }; + + return this.createAppropriateTask(taskFactory, options); + } + + private Task createAppropriateTask(TaskFactory taskFactory, TaskOptions options) { + // Retry policies and retry handlers will cause us to return a RetriableTask + if (options != null && (options.hasRetryPolicy() || options.hasRetryHandler())) { + return new RetriableTask(this, taskFactory, options.getRetryPolicy(), options.getRetryHandler()); + } else { + // Return a single vanilla task without any wrapper + return taskFactory.create(); + } + } + + public Task waitForExternalEvent(String name, Duration timeout, Class dataType) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(name, "name"); + Helpers.throwIfArgumentNull(dataType, "dataType"); + + int id = this.sequenceNumber++; + + CompletableTask eventTask = new ExternalEventTask<>(name, id, timeout); + + // Check for a previously received event with the same name + for (OrchestratorService.HistoryEvent e : this.unprocessedEvents) { + OrchestratorService.EventRaisedEvent existing = e.getEventRaised(); + if (name.equalsIgnoreCase(existing.getName())) { + String rawEventData = existing.getInput().getValue(); + V data = this.dataConverter.deserialize(rawEventData, dataType); + eventTask.complete(data); + this.unprocessedEvents.remove(e); + return eventTask; + } + } + + boolean hasTimeout = !Helpers.isInfiniteTimeout(timeout); + + // Immediately cancel the task and return if the timeout is zero. + if (hasTimeout && timeout.isZero()) { + eventTask.cancel(); + return eventTask; + } + + // Add this task to the list of tasks waiting for an external event. + TaskRecord record = new TaskRecord<>(eventTask, name, dataType); + Queue> eventQueue = this.outstandingEvents.computeIfAbsent(name, k -> new LinkedList<>()); + eventQueue.add(record); + + // If a non-infinite timeout is specified, schedule an internal durable timer. + // If the timer expires and the external event task hasn't yet completed, we'll cancel the task. + if (hasTimeout) { + this.createTimer(timeout).future.thenRun(() -> { + if (!eventTask.isDone()) { + // Book-keeping - remove the task record for the canceled task + eventQueue.removeIf(t -> t.task == eventTask); + if (eventQueue.isEmpty()) { + this.outstandingEvents.remove(name); + } + + eventTask.cancel(); + } + }); + } + + return eventTask; + } + + private void handleTaskScheduled(OrchestratorService.HistoryEvent e) { + int taskId = e.getEventId(); + + OrchestratorService.TaskScheduledEvent taskScheduled = e.getTaskScheduled(); + + // The history shows that this orchestrator created a durable task in a previous execution. + // We can therefore remove it from the map of pending actions. If we can't find the pending + // action, then we assume a non-deterministic code violation in the orchestrator. + OrchestratorService.OrchestratorAction taskAction = this.pendingActions.remove(taskId); + if (taskAction == null) { + String message = String.format( + "Non-deterministic orchestrator detected: a history event scheduling an activity task with sequence " + + "ID %d and name '%s' was replayed but the current orchestrator implementation didn't actually " + + "schedule this task. Was a change made to the orchestrator code after this instance " + + "had already started running?", + taskId, + taskScheduled.getName()); + throw new NonDeterministicOrchestratorException(message); + } + } + + @SuppressWarnings("unchecked") + private void handleTaskCompleted(OrchestratorService.HistoryEvent e) { + OrchestratorService.TaskCompletedEvent completedEvent = e.getTaskCompleted(); + int taskId = completedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + this.logger.warning("Discarding a potentially duplicate TaskCompleted event with ID = " + taskId); + return; + } + + String rawResult = completedEvent.getResult().getValue(); + + if (!this.isReplaying) { + // TODO: Structured logging + // TODO: Would it make more sense to put this log in the activity executor? + this.logger.fine(() -> String.format( + "%s: Activity '%s' (#%d) completed with serialized output: %s", + this.instanceId, + record.getTaskName(), + taskId, + rawResult != null ? rawResult : "(null)")); + + } + CompletableTask task = record.getTask(); + try { + Object result = this.dataConverter.deserialize(rawResult, record.getDataType()); + task.complete(result); + } catch (Exception ex) { + task.completeExceptionally(ex); + } + } + + private void handleTaskFailed(OrchestratorService.HistoryEvent e) { + OrchestratorService.TaskFailedEvent failedEvent = e.getTaskFailed(); + int taskId = failedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + // TODO: Log a warning about a potential duplicate task completion event + return; + } + + FailureDetails details = new FailureDetails(failedEvent.getFailureDetails()); + + if (!this.isReplaying) { + // TODO: Log task failure, including the number of bytes in the result + } + + CompletableTask task = record.getTask(); + TaskFailedException exception = new TaskFailedException( + record.taskName, + taskId, + details); + task.completeExceptionally(exception); + } + + @SuppressWarnings("unchecked") + private void handleEventRaised(OrchestratorService.HistoryEvent e) { + OrchestratorService.EventRaisedEvent eventRaised = e.getEventRaised(); + String eventName = eventRaised.getName(); + + Queue> outstandingEventQueue = this.outstandingEvents.get(eventName); + if (outstandingEventQueue == null) { + // No code is waiting for this event. Buffer it in case user-code waits for it later. + this.unprocessedEvents.add(e); + return; + } + + // Signal the first waiter in the queue with this event payload. + TaskRecord matchingTaskRecord = outstandingEventQueue.remove(); + if (outstandingEventQueue.isEmpty()) { + this.outstandingEvents.remove(eventName); + } + String rawResult = eventRaised.getInput().getValue(); + CompletableTask task = matchingTaskRecord.getTask(); + try { + Object result = this.dataConverter.deserialize( + rawResult, + matchingTaskRecord.getDataType()); + task.complete(result); + } catch (Exception ex) { + task.completeExceptionally(ex); + } + } + + private void handleEventWhileSuspended(OrchestratorService.HistoryEvent historyEvent) { + if (historyEvent.getEventTypeCase() != OrchestratorService.HistoryEvent.EventTypeCase.EXECUTIONSUSPENDED) { + eventsWhileSuspended.offer(historyEvent); + } + } + + private void handleExecutionSuspended(OrchestratorService.HistoryEvent historyEvent) { + this.isSuspended = true; + } + + private void handleExecutionResumed(OrchestratorService.HistoryEvent historyEvent) { + this.isSuspended = false; + while (!eventsWhileSuspended.isEmpty()) { + this.processEvent(eventsWhileSuspended.poll()); + } + } + + public Task createTimer(Duration duration) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(duration, "duration"); + + Instant finalFireAt = this.currentInstant.plus(duration); + return createTimer(finalFireAt); + } + + @Override + public Task createTimer(ZonedDateTime zonedDateTime) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + Helpers.throwIfArgumentNull(zonedDateTime, "zonedDateTime"); + + Instant finalFireAt = zonedDateTime.toInstant(); + return createTimer(finalFireAt); + } + + private Task createTimer(Instant finalFireAt) { + return new TimerTask(finalFireAt); + } + + private CompletableTask createInstantTimer(int id, Instant fireAt) { + Timestamp ts = DataConverter.getTimestampFromInstant(fireAt); + this.pendingActions.put(id, OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setCreateTimer(OrchestratorService.CreateTimerAction.newBuilder().setFireAt(ts)) + .build()); + + if (!this.isReplaying) { + logger.finer(() -> String.format("Creating Instant Timer with id: %s, fireAt: %s ", id, fireAt)); + } + + CompletableTask timerTask = new CompletableTask<>(); + TaskRecord record = new TaskRecord<>(timerTask, "(timer)", Void.class); + this.openTasks.put(id, record); + return timerTask; + } + + private void handleTimerCreated(OrchestratorService.HistoryEvent e) { + int timerEventId = e.getEventId(); + if (timerEventId == -100) { + // Infrastructure timer used by the dispatcher to break transactions into multiple batches + return; + } + + OrchestratorService.TimerCreatedEvent timerCreatedEvent = e.getTimerCreated(); + + // The history shows that this orchestrator created a durable timer in a previous execution. + // We can therefore remove it from the map of pending actions. If we can't find the pending + // action, then we assume a non-deterministic code violation in the orchestrator. + OrchestratorService.OrchestratorAction timerAction = this.pendingActions.remove(timerEventId); + if (timerAction == null) { + String message = String.format( + "Non-deterministic orchestrator detected: a history event creating a timer with ID %d and " + + "fire-at time %s was replayed but the current orchestrator implementation didn't actually create " + + "this timer. Was a change made to the orchestrator code after this instance " + + "had already started running?", + timerEventId, + DataConverter.getInstantFromTimestamp(timerCreatedEvent.getFireAt())); + throw new NonDeterministicOrchestratorException(message); + } + } + + public void handleTimerFired(OrchestratorService.HistoryEvent e) { + OrchestratorService.TimerFiredEvent timerFiredEvent = e.getTimerFired(); + int timerEventId = timerFiredEvent.getTimerId(); + TaskRecord record = this.openTasks.remove(timerEventId); + if (record == null) { + // TODO: Log a warning about a potential duplicate timer fired event + return; + } + + if (!this.isReplaying) { + this.logger.finer(() -> + String.format("Firing timer by completing task: %s expected fire at time: %s", timerEventId, + Instant.ofEpochSecond(timerFiredEvent.getFireAt().getSeconds(), + timerFiredEvent.getFireAt().getNanos()))); + } + + CompletableTask task = record.getTask(); + task.complete(null); + } + + private void handleSubOrchestrationCreated(OrchestratorService.HistoryEvent e) { + int taskId = e.getEventId(); + OrchestratorService.SubOrchestrationInstanceCreatedEvent subOrchestrationInstanceCreated = + e.getSubOrchestrationInstanceCreated(); + OrchestratorService.OrchestratorAction taskAction = this.pendingActions.remove(taskId); + if (taskAction == null) { + String message = String.format( + "Non-deterministic orchestrator detected: a history event scheduling an sub-orchestration task " + + "with sequence ID %d and name '%s' was replayed but the current orchestrator implementation didn't " + + "actually schedule this task. Was a change made to the orchestrator code after this instance had " + + "already started running?", + taskId, + subOrchestrationInstanceCreated.getName()); + throw new NonDeterministicOrchestratorException(message); + } + } + + private void handleSubOrchestrationCompleted(OrchestratorService.HistoryEvent e) { + OrchestratorService.SubOrchestrationInstanceCompletedEvent subOrchestrationInstanceCompletedEvent = + e.getSubOrchestrationInstanceCompleted(); + int taskId = subOrchestrationInstanceCompletedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + this.logger.warning("Discarding a potentially duplicate SubOrchestrationInstanceCompleted " + + "event with ID = " + taskId); + return; + } + String rawResult = subOrchestrationInstanceCompletedEvent.getResult().getValue(); + + if (!this.isReplaying) { + // TODO: Structured logging + // TODO: Would it make more sense to put this log in the activity executor? + this.logger.fine(() -> String.format( + "%s: Sub-orchestrator '%s' (#%d) completed with serialized output: %s", + this.instanceId, + record.getTaskName(), + taskId, + rawResult != null ? rawResult : "(null)")); + + } + CompletableTask task = record.getTask(); + try { + Object result = this.dataConverter.deserialize(rawResult, record.getDataType()); + task.complete(result); + } catch (Exception ex) { + task.completeExceptionally(ex); + } + } + + private void handleSubOrchestrationFailed(OrchestratorService.HistoryEvent e) { + OrchestratorService.SubOrchestrationInstanceFailedEvent subOrchestrationInstanceFailedEvent = + e.getSubOrchestrationInstanceFailed(); + int taskId = subOrchestrationInstanceFailedEvent.getTaskScheduledId(); + TaskRecord record = this.openTasks.remove(taskId); + if (record == null) { + // TODO: Log a warning about a potential duplicate task completion event + return; + } + + FailureDetails details = new FailureDetails(subOrchestrationInstanceFailedEvent.getFailureDetails()); + + if (!this.isReplaying) { + // TODO: Log task failure, including the number of bytes in the result + } + + CompletableTask task = record.getTask(); + TaskFailedException exception = new TaskFailedException( + record.taskName, + taskId, + details); + task.completeExceptionally(exception); + } + + private void handleExecutionTerminated(OrchestratorService.HistoryEvent e) { + OrchestratorService.ExecutionTerminatedEvent executionTerminatedEvent = e.getExecutionTerminated(); + this.completeInternal(executionTerminatedEvent.getInput().getValue(), null, + OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_TERMINATED); + } + + @Override + public void complete(Object output) { + if (this.continuedAsNew) { + this.completeInternal(this.continuedAsNewInput, + OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_CONTINUED_AS_NEW); + } else { + this.completeInternal(output, OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_COMPLETED); + } + } + + public void fail(FailureDetails failureDetails) { + // TODO: How does a parent orchestration use the output to construct an exception? + this.completeInternal(null, failureDetails, + OrchestratorService.OrchestrationStatus.ORCHESTRATION_STATUS_FAILED); + } + + private void completeInternal(Object output, OrchestratorService.OrchestrationStatus runtimeStatus) { + String resultAsJson = TaskOrchestrationExecutor.this.dataConverter.serialize(output); + this.completeInternal(resultAsJson, null, runtimeStatus); + } + + private void completeInternal( + @Nullable String rawOutput, + @Nullable FailureDetails failureDetails, + OrchestratorService.OrchestrationStatus runtimeStatus) { + Helpers.throwIfOrchestratorComplete(this.isComplete); + + + OrchestratorService.CompleteOrchestrationAction.Builder builder = OrchestratorService.CompleteOrchestrationAction + .newBuilder(); + builder.setOrchestrationStatus(runtimeStatus); + + if (rawOutput != null) { + builder.setResult(StringValue.of(rawOutput)); + } + + if (failureDetails != null) { + builder.setFailureDetails(failureDetails.toProto()); + } + + if (this.continuedAsNew && this.preserveUnprocessedEvents) { + addCarryoverEvents(builder); + } + + if (!this.isReplaying) { + // TODO: Log completion, including the number of bytes in the output + } + + int id = this.sequenceNumber++; + OrchestratorService.OrchestratorAction action = OrchestratorService.OrchestratorAction.newBuilder() + .setId(id) + .setCompleteOrchestration(builder.build()) + .build(); + this.pendingActions.put(id, action); + this.isComplete = true; + } + + private void addCarryoverEvents(OrchestratorService.CompleteOrchestrationAction.Builder builder) { + // Add historyEvent in the unprocessedEvents buffer + // Add historyEvent in the new event list that haven't been added to the buffer. + // We don't check the event in the pass event list to avoid duplicated events. + Set externalEvents = new HashSet<>(this.unprocessedEvents); + List newEvents = this.historyEventPlayer.getNewEvents(); + int currentHistoryIndex = this.historyEventPlayer.getCurrentHistoryIndex(); + + // Only add events that haven't been processed to the carryOverEvents + // currentHistoryIndex will point to the first unprocessed event + for (int i = currentHistoryIndex; i < newEvents.size(); i++) { + OrchestratorService.HistoryEvent historyEvent = newEvents.get(i); + if (historyEvent.getEventTypeCase() == OrchestratorService.HistoryEvent.EventTypeCase.EVENTRAISED) { + externalEvents.add(historyEvent); + } + } + + externalEvents.forEach(builder::addCarryoverEvents); + } + + private boolean waitingForEvents() { + return this.outstandingEvents.size() > 0; + } + + private boolean processNextEvent() { + return this.historyEventPlayer.moveNext(); + } + + private void processEvent(OrchestratorService.HistoryEvent e) { + boolean overrideSuspension = e.getEventTypeCase() + == OrchestratorService.HistoryEvent.EventTypeCase.EXECUTIONRESUMED + || e.getEventTypeCase() == OrchestratorService.HistoryEvent.EventTypeCase.EXECUTIONTERMINATED; + if (this.isSuspended && !overrideSuspension) { + this.handleEventWhileSuspended(e); + } else { + this.logger.fine(() -> this.instanceId + ": Processing event: " + e.getEventTypeCase()); + switch (e.getEventTypeCase()) { + case ORCHESTRATORSTARTED: + Instant instant = DataConverter.getInstantFromTimestamp(e.getTimestamp()); + this.setCurrentInstant(instant); + this.logger.fine(() -> this.instanceId + ": Workflow orchestrator started"); + break; + case ORCHESTRATORCOMPLETED: + // No action needed + this.logger.fine(() -> this.instanceId + ": Workflow orchestrator completed"); + break; + case EXECUTIONSTARTED: + OrchestratorService.ExecutionStartedEvent executionStarted = e.getExecutionStarted(); + this.setName(executionStarted.getName()); + this.setInput(executionStarted.getInput().getValue()); + this.setInstanceId(executionStarted.getOrchestrationInstance().getInstanceId()); + this.logger.fine(() -> this.instanceId + ": Workflow execution started"); + this.setAppId(e.getRouter().getSourceAppID()); + + // Create and invoke the workflow orchestrator + TaskOrchestrationFactory factory = TaskOrchestrationExecutor.this.orchestrationFactories + .get(executionStarted.getName()); + if (factory == null) { + // Try getting the default orchestrator + factory = TaskOrchestrationExecutor.this.orchestrationFactories.get("*"); + } + // TODO: Throw if the factory is null (orchestration by that name doesn't exist) + if (factory == null) { + throw new IllegalStateException("No factory found for orchestrator: " + executionStarted.getName()); + } + + TaskOrchestration orchestrator = factory.create(); + orchestrator.run(this); + break; + case EXECUTIONCOMPLETED: + this.logger.fine(() -> this.instanceId + ": Workflow execution completed"); + break; + case EXECUTIONTERMINATED: + this.handleExecutionTerminated(e); + break; + case TASKSCHEDULED: + this.handleTaskScheduled(e); + break; + case TASKCOMPLETED: + this.handleTaskCompleted(e); + break; + case TASKFAILED: + this.handleTaskFailed(e); + break; + case TIMERCREATED: + this.handleTimerCreated(e); + break; + case TIMERFIRED: + this.handleTimerFired(e); + break; + case SUBORCHESTRATIONINSTANCECREATED: + this.handleSubOrchestrationCreated(e); + break; + case SUBORCHESTRATIONINSTANCECOMPLETED: + this.handleSubOrchestrationCompleted(e); + break; + case SUBORCHESTRATIONINSTANCEFAILED: + this.handleSubOrchestrationFailed(e); + break; + case EVENTRAISED: + this.handleEventRaised(e); + break; + case EXECUTIONSUSPENDED: + this.handleExecutionSuspended(e); + break; + case EXECUTIONRESUMED: + this.handleExecutionResumed(e); + break; + default: + throw new IllegalStateException("Don't know how to handle history type " + e.getEventTypeCase()); + } + } + } + + private class TaskRecord { + private final CompletableTask task; + private final String taskName; + private final Class dataType; + + public TaskRecord(CompletableTask task, String taskName, Class dataType) { + this.task = task; + this.taskName = taskName; + this.dataType = dataType; + } + + public CompletableTask getTask() { + return this.task; + } + + public String getTaskName() { + return this.taskName; + } + + public Class getDataType() { + return this.dataType; + } + } + + private class OrchestrationHistoryIterator { + private final List pastEvents; + private final List newEvents; + + private List currentHistoryList; + private int currentHistoryIndex; + + public OrchestrationHistoryIterator(List pastEvents, + List newEvents) { + this.pastEvents = pastEvents; + this.newEvents = newEvents; + this.currentHistoryList = pastEvents; + } + + public boolean moveNext() { + if (this.currentHistoryList == pastEvents && this.currentHistoryIndex >= pastEvents.size()) { + // Move forward to the next list + this.currentHistoryList = this.newEvents; + this.currentHistoryIndex = 0; + + ContextImplTask.this.setDoneReplaying(); + } + + if (this.currentHistoryList == this.newEvents && this.currentHistoryIndex >= this.newEvents.size()) { + // We're done enumerating the history + return false; + } + + // Process the next event in the history + OrchestratorService.HistoryEvent next = this.currentHistoryList.get(this.currentHistoryIndex++); + ContextImplTask.this.processEvent(next); + return true; + } + + List getNewEvents() { + return this.newEvents; + } + + int getCurrentHistoryIndex() { + return this.currentHistoryIndex; + } + } + + private class TimerTask extends CompletableTask { + private Instant finalFireAt; + CompletableTask task; + + public TimerTask(Instant finalFireAt) { + super(); + CompletableTask firstTimer = createTimerTask(finalFireAt); + CompletableFuture timerChain = createTimerChain(finalFireAt, firstTimer.future); + this.task = new CompletableTask<>(timerChain); + this.finalFireAt = finalFireAt; + } + + // For a short timer (less than maximumTimerInterval), once the currentFuture completes, + // we must have reached finalFireAt, so we return and no more sub-timers are created. For a long timer + // (more than maximumTimerInterval), once a given currentFuture completes, we check if we have not yet + // reached finalFireAt. If that is the case, we create a new sub-timer task and make a recursive call on + // that new sub-timer task so that once it completes, another sub-timer task is created + // if necessary. Otherwise, we return and no more sub-timers are created. + private CompletableFuture createTimerChain(Instant finalFireAt, CompletableFuture currentFuture) { + return currentFuture.thenRun(() -> { + Instant currentInstsanceMinusNanos = currentInstant.minusNanos(currentInstant.getNano()); + Instant finalFireAtMinusNanos = finalFireAt.minusNanos(finalFireAt.getNano()); + if (currentInstsanceMinusNanos.compareTo(finalFireAtMinusNanos) >= 0) { + return; + } + Task nextTimer = createTimerTask(finalFireAt); + createTimerChain(finalFireAt, nextTimer.future); + }); + } + + private CompletableTask createTimerTask(Instant finalFireAt) { + CompletableTask nextTimer; + Duration remainingTime = Duration.between(currentInstant, finalFireAt); + if (remainingTime.compareTo(maximumTimerInterval) > 0) { + Instant nextFireAt = currentInstant.plus(maximumTimerInterval); + nextTimer = createInstantTimer(sequenceNumber++, nextFireAt); + } else { + nextTimer = createInstantTimer(sequenceNumber++, finalFireAt); + } + nextTimer.setParentTask(this); + return nextTimer; + } + + private void handleSubTimerSuccess() { + // check if it is the last timer + Instant currentInstantMinusNanos = currentInstant.minusNanos(currentInstant.getNano()); + Instant finalFireAtMinusNanos = finalFireAt.minusNanos(finalFireAt.getNano()); + if (currentInstantMinusNanos.compareTo(finalFireAtMinusNanos) >= 0) { + this.complete(null); + } + } + + @Override + public Void await() { + return this.task.await(); + } + + } + + private class ExternalEventTask extends CompletableTask { + private final String eventName; + private final Duration timeout; + private final int taskId; + + public ExternalEventTask(String eventName, int taskId, Duration timeout) { + this.eventName = eventName; + this.taskId = taskId; + this.timeout = timeout; + } + + // TODO: Shouldn't this be throws TaskCanceledException? + @Override + protected void handleException(Throwable e) { + // Cancellation is caused by user-specified timeouts + if (e instanceof CancellationException) { + String message = String.format( + "Timeout of %s expired while waiting for an event named '%s' (ID = %d).", + this.timeout, + this.eventName, + this.taskId); + throw new TaskCanceledException(message, this.eventName, this.taskId); + } + + super.handleException(e); + } + } + + // Task implementation that implements a retry policy + private class RetriableTask extends CompletableTask { + private final RetryPolicy policy; + private final RetryHandler handler; + private final TaskOrchestrationContext context; + private final Instant firstAttempt; + private final TaskFactory taskFactory; + + private FailureDetails lastFailure; + private Duration totalRetryTime; + private Instant startTime; + private int attemptNumber; + private Task childTask; + + public RetriableTask(TaskOrchestrationContext context, TaskFactory taskFactory, RetryPolicy policy) { + this(context, taskFactory, policy, null); + } + + public RetriableTask(TaskOrchestrationContext context, TaskFactory taskFactory, RetryHandler handler) { + this(context, taskFactory, null, handler); + } + + private RetriableTask( + TaskOrchestrationContext context, + TaskFactory taskFactory, + @Nullable RetryPolicy retryPolicy, + @Nullable RetryHandler retryHandler) { + this.context = context; + this.taskFactory = taskFactory; + this.policy = retryPolicy; + this.handler = retryHandler; + this.firstAttempt = context.getCurrentInstant(); + this.totalRetryTime = Duration.ZERO; + this.createChildTask(taskFactory); + } + + // Every RetriableTask will have a CompletableTask as a child task. + private void createChildTask(TaskFactory taskFactory) { + CompletableTask childTask = (CompletableTask) taskFactory.create(); + this.setChildTask(childTask); + childTask.setParentTask(this); + } + + public void setChildTask(Task childTask) { + this.childTask = childTask; + } + + public Task getChildTask() { + return this.childTask; + } + + void handleChildSuccess(V result) { + this.complete(result); + } + + void handleChildException(Throwable ex) { + tryRetry((TaskFailedException) ex); + } + + void init() { + this.startTime = this.startTime == null ? this.context.getCurrentInstant() : this.startTime; + this.attemptNumber++; + } + + public void tryRetry(TaskFailedException ex) { + this.lastFailure = ex.getErrorDetails(); + if (!this.shouldRetry()) { + this.completeExceptionally(ex); + return; + } + + // Overflow/runaway retry protection + if (this.attemptNumber == Integer.MAX_VALUE) { + this.completeExceptionally(ex); + return; + } + + Duration delay = this.getNextDelay(); + if (!delay.isZero() && !delay.isNegative()) { + // Use a durable timer to create the delay between retries + this.context.createTimer(delay).await(); + } + + this.totalRetryTime = Duration.between(this.startTime, this.context.getCurrentInstant()); + this.createChildTask(this.taskFactory); + this.await(); + } + + @Override + public V await() { + this.init(); + // when awaiting the first child task, we will continue iterating over the history until a result is found + // for that task. If the result is an exception, the child task will invoke "handleChildException" on this + // object, which awaits a timer, *re-sets the current child task to correspond to a retry of this task*, + // and then awaits that child. + // This logic continues until either the operation succeeds, or are our retry quota is met. + // At that point, we break the `await()` on the child task. + // Therefore, once we return from the following `await`, + // we just need to await again on the *current* child task to obtain the result of this task + try { + this.getChildTask().await(); + } catch (OrchestratorBlockedException ex) { + throw ex; + } catch (Exception ignored) { + // ignore the exception from previous child tasks. + // Only needs to return result from the last child task, which is on next line. + } + // Always return the last child task result. + return this.getChildTask().await(); + } + + private boolean shouldRetry() { + if (this.lastFailure.isNonRetriable()) { + logger.warning("Not performing any retries because the error is non retriable"); + + return false; + } + + if (this.policy == null && this.handler == null) { + // We should never get here, but if we do, returning false is the natural behavior. + return false; + } + + RetryContext retryContext = new RetryContext( + this.context, + this.attemptNumber, + this.lastFailure, + this.totalRetryTime); + + // These must default to true if not provided, so it is possible to use only one of them at a time + boolean shouldRetryBasedOnPolicy = this.policy != null ? this.shouldRetryBasedOnPolicy() : true; + boolean shouldRetryBasedOnHandler = this.handler != null ? this.handler.handle(retryContext) : true; + + // Only log when not replaying, so only the current attempt is logged and not all previous attempts. + if (!this.context.getIsReplaying()) { + if (this.policy != null) { + logger.fine(() -> String.format("shouldRetryBasedOnPolicy: %s", shouldRetryBasedOnPolicy)); + } + + if (this.handler != null) { + logger.fine(() -> String.format("shouldRetryBasedOnHandler: %s", shouldRetryBasedOnHandler)); + } + } + + return shouldRetryBasedOnPolicy && shouldRetryBasedOnHandler; + } + + private boolean shouldRetryBasedOnPolicy() { + // Only log when not replaying, so only the current attempt is logged and not all previous attempts. + if (!this.context.getIsReplaying()) { + logger.fine(() -> String.format("Retry Policy: %d retries out of total %d performed ", this.attemptNumber, + this.policy.getMaxNumberOfAttempts())); + } + + if (this.attemptNumber >= this.policy.getMaxNumberOfAttempts()) { + // Max number of attempts exceeded + return false; + } + + // Duration.ZERO is interpreted as no maximum timeout + Duration retryTimeout = this.policy.getRetryTimeout(); + if (retryTimeout.compareTo(Duration.ZERO) > 0) { + Instant retryExpiration = this.firstAttempt.plus(retryTimeout); + if (this.context.getCurrentInstant().compareTo(retryExpiration) >= 0) { + // Max retry timeout exceeded + return false; + } + } + + // Keep retrying + return true; + } + + private Duration getNextDelay() { + if (this.policy != null) { + long maxDelayInMillis = this.policy.getMaxRetryInterval().toMillis(); + + long nextDelayInMillis; + try { + nextDelayInMillis = Math.multiplyExact( + this.policy.getFirstRetryInterval().toMillis(), + (long) Helpers.powExact(this.policy.getBackoffCoefficient(), this.attemptNumber)); + } catch (ArithmeticException overflowException) { + if (maxDelayInMillis > 0) { + return this.policy.getMaxRetryInterval(); + } else { + // If no maximum is specified, just throw + throw new ArithmeticException("The retry policy calculation resulted in an arithmetic " + + "overflow and no max retry interval was configured."); + } + } + + // NOTE: A max delay of zero or less is interpreted to mean no max delay + if (nextDelayInMillis > maxDelayInMillis && maxDelayInMillis > 0) { + return this.policy.getMaxRetryInterval(); + } else { + return Duration.ofMillis(nextDelayInMillis); + } + } + + // If there's no declarative retry policy defined, then the custom code retry handler + // is responsible for implementing any delays between retry attempts. + return Duration.ZERO; + } + } + + private class CompoundTask extends CompletableTask { + + List> subTasks; + + CompoundTask(List> subtasks, CompletableFuture future) { + super(future); + this.subTasks = subtasks; + } + + @Override + public U await() { + this.initSubTasks(); + return super.await(); + } + + private void initSubTasks() { + for (Task subTask : this.subTasks) { + if (subTask instanceof RetriableTask) { + ((RetriableTask) subTask).init(); + } + } + } + } + + private class CompletableTask extends Task { + private Task parentTask; + + public CompletableTask() { + this(new CompletableFuture<>()); + } + + CompletableTask(CompletableFuture future) { + super(future); + } + + public void setParentTask(Task parentTask) { + this.parentTask = parentTask; + } + + public Task getParentTask() { + return this.parentTask; + } + + @Override + public V await() { + do { + // If the future is done, return its value right away + if (this.future.isDone()) { + try { + return this.future.get(); + } catch (ExecutionException e) { + // rethrow if it's ContinueAsNewInterruption + if (e.getCause() instanceof ContinueAsNewInterruption) { + throw (ContinueAsNewInterruption) e.getCause(); + } + this.handleException(e.getCause()); + } catch (Exception e) { + this.handleException(e); + } + } + } while (processNextEvent()); + + // There's no more history left to replay and the current task is still not completed. This is normal. + // The OrchestratorBlockedException exception allows us to yield the current thread back to the executor so + // that we can send the current set of actions back to the worker and wait for new events to come in. + // This is *not* an exception - it's a normal part of orchestrator control flow. + throw new OrchestratorBlockedException( + "The orchestrator is blocked and waiting for new inputs. " + + "This Throwable should never be caught by user code."); + } + + private boolean processNextEvent() { + try { + return ContextImplTask.this.processNextEvent(); + } catch (OrchestratorBlockedException | ContinueAsNewInterruption exception) { + throw exception; + } catch (Exception e) { + // ignore + // + // We ignore the exception. Any Durable Task exceptions thrown here can be obtained when calling + //{code#future.get()} in the implementation of 'await'. We defer to that loop to handle the exception. + // + } + // Any exception happen we return true so that we will enter to the do-while block for the last time. + return true; + } + + @Override + public CompletableTask thenApply(Function fn) { + CompletableFuture newFuture = this.future.thenApply(fn); + return new CompletableTask<>(newFuture); + } + + @Override + public Task thenAccept(Consumer fn) { + CompletableFuture newFuture = this.future.thenAccept(fn); + return new CompletableTask<>(newFuture); + } + + protected void handleException(Throwable e) { + if (e instanceof TaskFailedException) { + throw (TaskFailedException) e; + } + + if (e instanceof CompositeTaskFailedException) { + throw (CompositeTaskFailedException) e; + } + + if (e instanceof DataConverter.DataConverterException) { + throw (DataConverter.DataConverterException) e; + } + + throw new RuntimeException("Unexpected failure in the task execution", e); + } + + @Override + public boolean isDone() { + return this.future.isDone(); + } + + public boolean complete(V value) { + Task parentTask = this.getParentTask(); + boolean result = this.future.complete(value); + if (parentTask instanceof RetriableTask) { + // notify parent task + ((RetriableTask) parentTask).handleChildSuccess(value); + } + if (parentTask instanceof TimerTask) { + // notify parent task + ((TimerTask) parentTask).handleSubTimerSuccess(); + } + return result; + } + + private boolean cancel() { + return this.future.cancel(true); + } + + public boolean completeExceptionally(Throwable ex) { + Task parentTask = this.getParentTask(); + boolean result = this.future.completeExceptionally(ex); + if (parentTask instanceof RetriableTask) { + // notify parent task + ((RetriableTask) parentTask).handleChildException(ex); + } + return result; + } + } + } + + @FunctionalInterface + private interface TaskFactory { + Task create(); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java new file mode 100644 index 0000000000..274813b69f --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestrationFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +/** + * Factory interface for producing {@link TaskOrchestration} implementations. + */ +public interface TaskOrchestrationFactory { + /** + * Gets the name of the orchestration this factory creates. + * + * @return the name of the orchestration + */ + String getName(); + + /** + * Creates a new instance of {@link TaskOrchestration}. + * + * @return the created orchestration instance + */ + TaskOrchestration create(); +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java new file mode 100644 index 0000000000..705a41d5c0 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/TaskOrchestratorResult.java @@ -0,0 +1,40 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import io.dapr.durabletask.implementation.protobuf.OrchestratorService; + +import java.util.Collection; +import java.util.Collections; + +final class TaskOrchestratorResult { + + private final Collection actions; + + private final String customStatus; + + public TaskOrchestratorResult(Collection actions, String customStatus) { + this.actions = Collections.unmodifiableCollection(actions); + ; + this.customStatus = customStatus; + } + + public Collection getActions() { + return this.actions; + } + + public String getCustomStatus() { + return this.customStatus; + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java new file mode 100644 index 0000000000..e95c511573 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/ContinueAsNewInterruption.java @@ -0,0 +1,32 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask.interruption; + +import io.dapr.durabletask.TaskOrchestrationContext; + +/** + * Control flow {@code Throwable} class for orchestrator when invoke {@link TaskOrchestrationContext#continueAsNew}. + * This {@code Throwable} must never be caught by user + * code. + * + *

{@code ContinueAsNewInterruption} is thrown when an orchestrator calls + * {@link TaskOrchestrationContext#continueAsNew}. + * Catching {@code ContinueAsNewInterruption} in user code could prevent the orchestration from saving + * state and scheduling new tasks, resulting in the orchestration getting stuck.

+ */ +public class ContinueAsNewInterruption extends RuntimeException { + public ContinueAsNewInterruption(String message) { + super(message); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java new file mode 100644 index 0000000000..7eff5248f6 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/interruption/OrchestratorBlockedException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask.interruption; + +import io.dapr.durabletask.Task; + +/** + * Control flow {@code Throwable} class for orchestrator functions. This {@code Throwable} must never be caught by user + * code. + * + *

{@code OrchestratorBlockedException} is thrown when an orchestrator calls {@link Task#await} on an uncompleted + * task. The purpose of throwing in this way is to halt execution of the orchestrator to save the current state and + * commit any side effects. Catching {@code OrchestratorBlockedException} in user code could prevent the orchestration + * from saving state and scheduling new tasks, resulting in the orchestration getting stuck.

+ */ +public final class OrchestratorBlockedException extends RuntimeException { + public OrchestratorBlockedException(String message) { + super(message); + } +} diff --git a/durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java b/durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java new file mode 100644 index 0000000000..a55ed5fb12 --- /dev/null +++ b/durabletask-client/src/main/java/io/dapr/durabletask/util/UuidGenerator.java @@ -0,0 +1,63 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask.util; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.UUID; + +/** + * Utility class for generating UUIDs. + * + */ +public class UuidGenerator { + + /** + * Generates a UUID. + * @param version for the UUID generation + * @param algorithm to be used + * @param namespace for the UUID generation + * @param name for the UUID generation + * @return the generated UUID + */ + public static UUID generate(int version, String algorithm, UUID namespace, String name) { + + MessageDigest hasher = hasher(algorithm); + + if (namespace != null) { + ByteBuffer ns = ByteBuffer.allocate(16); + ns.putLong(namespace.getMostSignificantBits()); + ns.putLong(namespace.getLeastSignificantBits()); + hasher.update(ns.array()); + } + + hasher.update(name.getBytes(StandardCharsets.UTF_8)); + ByteBuffer hash = ByteBuffer.wrap(hasher.digest()); + + final long msb = (hash.getLong() & 0xffffffffffff0fffL) | (version & 0x0f) << 12; + final long lsb = (hash.getLong() & 0x3fffffffffffffffL) | 0x8000000000000000L; + + return new UUID(msb, lsb); + } + + private static MessageDigest hasher(String algorithm) { + try { + return MessageDigest.getInstance(algorithm); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException(String.format("%s not supported.", algorithm)); + } + } +} diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java new file mode 100644 index 0000000000..85c7de0e41 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskClientIT.java @@ -0,0 +1,1785 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ +package io.dapr.durabletask; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZonedDateTime; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReferenceArray; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +/** + * These integration tests are designed to exercise the core, high-level features of + * the Durable Task programming model. + *

+ * These tests currently require a sidecar process to be + * running on the local machine (the sidecar is what accepts the client operations and + * sends invocation instructions to the DurableTaskWorker). + */ +@Tag("integration") +public class DurableTaskClientIT extends IntegrationTestBase { + static final Duration defaultTimeout = Duration.ofSeconds(100); + // All tests that create a server should save it to this variable for proper shutdown + private DurableTaskGrpcWorker server; + + + @Test + void emptyOrchestration() throws TimeoutException { + final String orchestratorName = "EmptyOrchestration"; + final String input = "Hello " + Instant.now(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.complete(ctx.getInput(String.class))) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, input); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(input, instance.readInputAs(String.class)); + assertEquals(input, instance.readOutputAs(String.class)); + } + } + + @Test + void singleTimer() throws IOException, TimeoutException { + final String orchestratorName = "SingleTimer"; + final Duration delay = Duration.ofSeconds(3); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(2); + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + ctx.createTimer(delay).await(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + // This should yield 2 (first invocation + replay invocations for internal timers) + assertEquals(2, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[1]; + for (int i = 0; i < timestamps.length() - 1; i++) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } + assertEquals(3, secondsElapsed[0]); + + } + } + + + @Test + void loopWithTimer() throws IOException, TimeoutException { + final String orchestratorName = "LoopWithTimer"; + final Duration delay = Duration.ofSeconds(2); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(4); + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + for (int i = 0; i < 3; i++) { + if (!ctx.getIsReplaying()) { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + } + ctx.createTimer(delay).await(); + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + assertEquals(3, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[timestamps.length()]; + for (int i = 0; i < timestamps.length() - 1; i++) { + if (timestamps.get(i + 1) != null && timestamps.get(i) != null) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } else { + secondsElapsed[i] = -1; + } + } + assertEquals(2, secondsElapsed[0]); + assertEquals(2, secondsElapsed[1]); + assertEquals(-1, secondsElapsed[2]); + + + } + } + + @Test + void loopWithWaitForEvent() throws IOException, TimeoutException { + final String orchestratorName = "LoopWithTimer"; + final Duration delay = Duration.ofSeconds(2); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(4); + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + for (int i = 0; i < 4; i++) { + try { + ctx.waitForExternalEvent("HELLO", delay).await(); + } catch (TaskCanceledException tce) { + if (!ctx.getIsReplaying()) { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + } + + } + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + assertEquals(4, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[timestamps.length()]; + for (int i = 0; i < timestamps.length() - 1; i++) { + if (timestamps.get(i + 1) != null && timestamps.get(i) != null) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } else { + secondsElapsed[i] = -1; + } + } + assertEquals(2, secondsElapsed[0]); + assertEquals(2, secondsElapsed[1]); + assertEquals(2, secondsElapsed[2]); + assertEquals(0, secondsElapsed[3]); + + + } + } + + @Test + void longTimer() throws TimeoutException { + final String orchestratorName = "LongTimer"; + final Duration delay = Duration.ofSeconds(7); + AtomicInteger counter = new AtomicInteger(); + AtomicReferenceArray timestamps = new AtomicReferenceArray<>(4); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + timestamps.set(counter.get(), LocalDateTime.now()); + counter.incrementAndGet(); + ctx.createTimer(delay).await(); + }) + .setMaximumTimerInterval(Duration.ofSeconds(3)) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus(), + String.format("Orchestration failed with error: %s", instance.getFailureDetails().getErrorMessage())); + + // Verify that the delay actually happened + long expectedCompletionSecond = instance.getCreatedAt().plus(delay).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + // This should yield 4 (first invocation + replay invocations for internal timers 3s + 3s + 1s) + assertEquals(4, counter.get()); + + // Verify that each timer is the expected length + int[] secondsElapsed = new int[3]; + for (int i = 0; i < timestamps.length() - 1; i++) { + secondsElapsed[i] = timestamps.get(i + 1).getSecond() - timestamps.get(i).getSecond(); + } + assertEquals(secondsElapsed[0], 3); + assertEquals(secondsElapsed[1], 3); + assertEquals(secondsElapsed[2], 1); + } + } + + @Test + void longTimerNonblocking() throws TimeoutException { + final String orchestratorName = "ActivityAnyOf"; + final String externalEventActivityName = "externalEvent"; + final String externalEventWinner = "The external event completed first"; + final String timerEventWinner = "The timer event completed first"; + final Duration timerDuration = Duration.ofSeconds(20); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + Task externalEvent = ctx.waitForExternalEvent(externalEventActivityName, String.class); + Task longTimer = ctx.createTimer(timerDuration); + Task winnerEvent = ctx.anyOf(externalEvent, longTimer).await(); + if (winnerEvent == externalEvent) { + ctx.complete(externalEventWinner); + } else { + ctx.complete(timerEventWinner); + } + }).setMaximumTimerInterval(Duration.ofSeconds(3)).buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + client.raiseEvent(instanceId, externalEventActivityName, "Hello world"); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertTrue(output.equals(externalEventWinner)); + + long createdTime = instance.getCreatedAt().getEpochSecond(); + long completedTime = instance.getLastUpdatedAt().getEpochSecond(); + // Timer did not block execution + assertTrue(completedTime - createdTime < 5); + } + } + + @Test + void longTimerNonblockingNoExternal() throws TimeoutException { + final String orchestratorName = "ActivityAnyOf"; + final String externalEventActivityName = "externalEvent"; + final String externalEventWinner = "The external event completed first"; + final String timerEventWinner = "The timer event completed first"; + final Duration timerDuration = Duration.ofSeconds(20); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + Task externalEvent = ctx.waitForExternalEvent(externalEventActivityName, String.class); + Task longTimer = ctx.createTimer(timerDuration); + Task winnerEvent = ctx.anyOf(externalEvent, longTimer).await(); + if (winnerEvent == externalEvent) { + ctx.complete(externalEventWinner); + } else { + ctx.complete(timerEventWinner); + } + }).setMaximumTimerInterval(Duration.ofSeconds(3)).buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertTrue(output.equals(timerEventWinner)); + + long expectedCompletionSecond = instance.getCreatedAt().plus(timerDuration).getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + } + } + + + @Test + void longTimeStampTimer() throws TimeoutException { + final String orchestratorName = "LongTimeStampTimer"; + final Duration delay = Duration.ofSeconds(7); + final ZonedDateTime zonedDateTime = ZonedDateTime.of(LocalDateTime.now().plusSeconds(delay.getSeconds()), ZoneId.systemDefault()); + + AtomicInteger counter = new AtomicInteger(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + counter.incrementAndGet(); + ctx.createTimer(zonedDateTime).await(); + }) + .setMaximumTimerInterval(Duration.ofSeconds(3)) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = zonedDateTime.toInstant().getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + + // Verify that the correct number of timers were created + // This should yield 4 (first invocation + replay invocations for internal timers 3s + 3s + 2s) + // The timer can be created at 7s or 8s as clock is not precise, so we need to allow for that + assertTrue(counter.get() >= 4 && counter.get() <= 5); + } + } + + @Test + void singleTimeStampTimer() throws IOException, TimeoutException { + final String orchestratorName = "SingleTimeStampTimer"; + final Duration delay = Duration.ofSeconds(3); + final ZonedDateTime zonedDateTime = ZonedDateTime.of(LocalDateTime.now().plusSeconds(delay.getSeconds()), ZoneId.systemDefault()); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(zonedDateTime).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = zonedDateTime.toInstant().getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + } + } + + + @Test + void singleTimeStampCreateTimer() throws IOException, TimeoutException { + final String orchestratorName = "SingleTimeStampTimer"; + final Duration delay = Duration.ofSeconds(3); + final ZonedDateTime zonedDateTime = ZonedDateTime.of(LocalDateTime.now().plusSeconds(delay.getSeconds()), ZoneId.systemDefault()); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(zonedDateTime).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + Duration timeout = delay.plus(defaultTimeout); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, timeout, false); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the delay actually happened + long expectedCompletionSecond = zonedDateTime.toInstant().getEpochSecond(); + long actualCompletionSecond = instance.getLastUpdatedAt().getEpochSecond(); + assertTrue(expectedCompletionSecond <= actualCompletionSecond); + } + } + + @Test + void isReplaying() throws IOException, InterruptedException, TimeoutException { + final String orchestratorName = "SingleTimer"; + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ArrayList list = new ArrayList(); + list.add(ctx.getIsReplaying()); + ctx.createTimer(Duration.ofSeconds(0)).await(); + list.add(ctx.getIsReplaying()); + ctx.createTimer(Duration.ofSeconds(0)).await(); + list.add(ctx.getIsReplaying()); + ctx.complete(list); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + // Verify that the orchestrator reported the correct isReplaying values. + // Note that only the values of the *final* replay are returned. + List results = instance.readOutputAs(List.class); + assertEquals(3, results.size()); + assertTrue((Boolean) results.get(0)); + assertTrue((Boolean) results.get(1)); + assertFalse((Boolean) results.get(2)); + } + } + + @Test + void singleActivity() throws IOException, InterruptedException, TimeoutException { + final String orchestratorName = "SingleActivity"; + final String activityName = "Echo"; + final String input = Instant.now().toString(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String activityInput = ctx.getInput(String.class); + String output = ctx.callActivity(activityName, activityInput, String.class).await(); + ctx.complete(output); + }) + .addActivity(activityName, ctx -> { + return String.format("Hello, %s!", ctx.getInput(String.class)); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, input); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + String output = instance.readOutputAs(String.class); + String expected = String.format("Hello, %s!", input); + assertEquals(expected, output); + } + } + + @Test + void currentDateTimeUtc() throws IOException, TimeoutException { + final String orchestratorName = "CurrentDateTimeUtc"; + final String echoActivityName = "Echo"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + Instant currentInstant1 = ctx.getCurrentInstant(); + Instant originalInstant1 = ctx.callActivity(echoActivityName, currentInstant1, Instant.class).await(); + if (!currentInstant1.equals(originalInstant1)) { + ctx.complete(false); + return; + } + + Instant currentInstant2 = ctx.getCurrentInstant(); + Instant originalInstant2 = ctx.callActivity(echoActivityName, currentInstant2, Instant.class).await(); + if (!currentInstant2.equals(originalInstant2)) { + ctx.complete(false); + return; + } + + ctx.complete(!currentInstant1.equals(currentInstant2)); + }) + .addActivity(echoActivityName, ctx -> { + // Return the input back to the caller, regardless of its type + return ctx.getInput(Object.class); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertTrue(instance.readOutputAs(boolean.class)); + } + } + + @Test + void activityChain() throws IOException, TimeoutException { + final String orchestratorName = "ActivityChain"; + final String plusOneActivityName = "PlusOne"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + for (int i = 0; i < 10; i++) { + value = ctx.callActivity(plusOneActivityName, i, int.class).await(); + } + + ctx.complete(value); + }) + .addActivity(plusOneActivityName, ctx -> ctx.getInput(int.class) + 1) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(10, instance.readOutputAs(int.class)); + } + } + + @Test + void subOrchestration() throws TimeoutException { + final String orchestratorName = "SubOrchestration"; + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int result = 5; + int input = ctx.getInput(int.class); + if (input < 3) { + result += ctx.callSubOrchestrator(orchestratorName, input + 1, int.class).await(); + } + ctx.complete(result); + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 1); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(15, instance.readOutputAs(int.class)); + } + } + + @Test + void continueAsNew() throws TimeoutException { + final String orchestratorName = "continueAsNew"; + final Duration delay = Duration.ofSeconds(0); + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int input = ctx.getInput(int.class); + if (input < 10) { + ctx.createTimer(delay).await(); + ctx.continueAsNew(input + 1); + } else { + ctx.complete(input); + } + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 1); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(10, instance.readOutputAs(int.class)); + } + } + + @Test + void continueAsNewWithExternalEvents() throws TimeoutException, InterruptedException { + final String orchestratorName = "continueAsNewWithExternalEvents"; + final String eventName = "MyEvent"; + final int expectedEventCount = 10; + final Duration delay = Duration.ofSeconds(0); + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int receivedEventCount = ctx.getInput(int.class); + + if (receivedEventCount < expectedEventCount) { + ctx.waitForExternalEvent(eventName, int.class).await(); + ctx.continueAsNew(receivedEventCount + 1, true); + } else { + ctx.complete(receivedEventCount); + } + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + + for (int i = 0; i < expectedEventCount; i++) { + client.raiseEvent(instanceId, eventName, i); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(expectedEventCount, instance.readOutputAs(int.class)); + } + } + + @Test + void termination() throws TimeoutException { + final String orchestratorName = "Termination"; + final Duration delay = Duration.ofSeconds(3); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + String expectOutput = "I'll be back."; + client.terminate(instanceId, expectOutput); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(instanceId, instance.getInstanceId()); + assertEquals(OrchestrationRuntimeStatus.TERMINATED, instance.getRuntimeStatus()); + assertEquals(expectOutput, instance.readOutputAs(String.class)); + } + } + + + @ParameterizedTest + @ValueSource(booleans = {true}) + void restartOrchestrationWithNewInstanceId(boolean restartWithNewInstanceId) throws TimeoutException { + final String orchestratorName = "restart"; + final Duration delay = Duration.ofSeconds(3); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, "RestartTest"); + client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + String newInstanceId = client.restartInstance(instanceId, restartWithNewInstanceId); + OrchestrationMetadata instance = client.waitForInstanceCompletion(newInstanceId, defaultTimeout, true); + + if (restartWithNewInstanceId) { + assertNotEquals(instanceId, newInstanceId); + } else { + assertEquals(instanceId, newInstanceId); + } + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals("\"RestartTest\"", instance.getSerializedInput()); + } + } + + @Test + void restartOrchestrationThrowsException() { + final String orchestratorName = "restart"; + final Duration delay = Duration.ofSeconds(3); + final String nonExistentId = "123"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + client.scheduleNewOrchestrationInstance(orchestratorName, "RestartTest"); + + assertThrows( + IllegalArgumentException.class, + () -> client.restartInstance(nonExistentId, true) + ); + } + + } + + @Test + @Disabled("Test is disabled for investigation, fixing the test retry pattern exposed the failure") + void suspendResumeOrchestration() throws TimeoutException, InterruptedException { + final String orchestratorName = "suspend"; + final String eventName = "MyEvent"; + final String eventPayload = "testPayload"; + final Duration suspendTimeout = Duration.ofSeconds(5); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String payload = ctx.waitForExternalEvent(eventName, String.class).await(); + ctx.complete(payload); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + client.suspendInstance(instanceId); + OrchestrationMetadata instance = client.waitForInstanceStart(instanceId, defaultTimeout); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.SUSPENDED, instance.getRuntimeStatus()); + + client.raiseEvent(instanceId, eventName, eventPayload); + + assertThrows( + TimeoutException.class, + () -> client.waitForInstanceCompletion(instanceId, suspendTimeout, false), + "Expected to throw TimeoutException, but it didn't" + ); + + String resumeReason = "Resume for testing."; + client.resumeInstance(instanceId, resumeReason); + instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(instanceId, instance.getInstanceId()); + assertEquals(eventPayload, instance.readOutputAs(String.class)); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + } + } + + @Test + @Disabled("Test is disabled for investigation)") + void terminateSuspendOrchestration() throws TimeoutException, InterruptedException { + final String orchestratorName = "suspendResume"; + final String eventName = "MyEvent"; + final String eventPayload = "testPayload"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String payload = ctx.waitForExternalEvent(eventName, String.class).await(); + ctx.complete(payload); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + String suspendReason = "Suspend for testing."; + client.suspendInstance(instanceId, suspendReason); + client.terminate(instanceId, null); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, false); + assertNotNull(instance); + assertEquals(instanceId, instance.getInstanceId()); + assertEquals(OrchestrationRuntimeStatus.TERMINATED, instance.getRuntimeStatus()); + } + } + + @Test + void activityFanOut() throws IOException, TimeoutException { + final String orchestratorName = "ActivityFanOut"; + final String activityName = "ToString"; + final int activityCount = 10; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + // Schedule each task to run in parallel + List> parallelTasks = IntStream.range(0, activityCount) + .mapToObj(i -> ctx.callActivity(activityName, i, String.class)) + .collect(Collectors.toList()); + + // Wait for all tasks to complete, then sort and reverse the results + List results = ctx.allOf(parallelTasks).await(); + Collections.sort(results); + Collections.reverse(results); + ctx.complete(results); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + List output = instance.readOutputAs(List.class); + assertNotNull(output); + assertEquals(activityCount, output.size()); + assertEquals(String.class, output.get(0).getClass()); + + // Expected: ["9", "8", "7", "6", "5", "4", "3", "2", "1", "0"] + for (int i = 0; i < activityCount; i++) { + String expected = String.valueOf(activityCount - i - 1); + assertEquals(expected, output.get(i).toString()); + } + } + } + + @Test + void externalEvents() throws IOException, TimeoutException { + final String orchestratorName = "ExternalEvents"; + final String eventName = "MyEvent"; + final int eventCount = 10; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int i; + for (i = 0; i < eventCount; i++) { + // block until the event is received + int payload = ctx.waitForExternalEvent(eventName, int.class).await(); + if (payload != i) { + ctx.complete(-1); + return; + } + } + + ctx.complete(i); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + for (int i = 0; i < eventCount; i++) { + client.raiseEvent(instanceId, eventName, i); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + int output = instance.readOutputAs(int.class); + assertEquals(eventCount, output); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void externalEventsWithTimeouts(boolean raiseEvent) throws IOException, TimeoutException { + final String orchestratorName = "ExternalEventsWithTimeouts"; + final String eventName = "MyEvent"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + ctx.waitForExternalEvent(eventName, Duration.ofSeconds(3)).await(); + ctx.complete("received"); + } catch (TaskCanceledException e) { + ctx.complete(e.getMessage()); + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + client.waitForInstanceStart(instanceId, defaultTimeout); + if (raiseEvent) { + client.raiseEvent(instanceId, eventName); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + if (raiseEvent) { + assertEquals("received", output); + } else { + assertEquals("Timeout of PT3S expired while waiting for an event named '" + eventName + "' (ID = 0).", output); + } + } + } + + @Test + void setCustomStatus() throws TimeoutException { + final String orchestratorName = "SetCustomStatus"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ctx.setCustomStatus("Started!"); + Object customStatus = ctx.waitForExternalEvent("StatusEvent", Object.class).await(); + ctx.setCustomStatus(customStatus); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + OrchestrationMetadata metadata = client.waitForInstanceStart(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals("Started!", metadata.readCustomStatusAs(String.class)); + + Map payload = new HashMap() {{ + put("Hello", 45); + }}; + client.raiseEvent(metadata.getInstanceId(), "StatusEvent", payload); + + metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertTrue(metadata.isCustomStatusFetched()); + assertEquals(payload, metadata.readCustomStatusAs(HashMap.class)); + } + } + + @Test + void clearCustomStatus() throws TimeoutException { + final String orchestratorName = "ClearCustomStatus"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ctx.setCustomStatus("Started!"); + ctx.waitForExternalEvent("StatusEvent").await(); + ctx.clearCustomStatus(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + + OrchestrationMetadata metadata = client.waitForInstanceStart(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals("Started!", metadata.readCustomStatusAs(String.class)); + + client.raiseEvent(metadata.getInstanceId(), "StatusEvent"); + + metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertFalse(metadata.isCustomStatusFetched()); + } + } + + // due to clock drift, client/worker and sidecar time are not exactly synchronized, this test needs to accommodate for client vs backend timestamps difference + @Test + @Disabled("Test is disabled for investigation, fixing the test retry pattern exposed the failure") + void multiInstanceQuery() throws TimeoutException { + final String plusOne = "plusOne"; + final String waitForEvent = "waitForEvent"; + final DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(plusOne, ctx -> { + int value = ctx.getInput(int.class); + for (int i = 0; i < 10; i++) { + value = ctx.callActivity(plusOne, value, int.class).await(); + } + ctx.complete(value); + }) + .addActivity(plusOne, ctx -> ctx.getInput(int.class) + 1) + .addOrchestrator(waitForEvent, ctx -> { + String name = ctx.getInput(String.class); + String output = ctx.waitForExternalEvent(name, String.class).await(); + ctx.complete(output); + }).buildAndStart(); + + try (worker; client) { + Instant startTime = Instant.now(); + String prefix = startTime.toString(); + + IntStream.range(0, 5).mapToObj(i -> { + String instanceId = String.format("%s.sequence.%d", prefix, i); + client.scheduleNewOrchestrationInstance(plusOne, 0, instanceId); + return instanceId; + }).collect(Collectors.toUnmodifiableList()).forEach(id -> { + try { + client.waitForInstanceCompletion(id, defaultTimeout, true); + } catch (TimeoutException e) { + e.printStackTrace(); + } + }); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + } + + Instant sequencesFinishedTime = Instant.now(); + + IntStream.range(0, 5).mapToObj(i -> { + String instanceId = String.format("%s.waiter.%d", prefix, i); + client.scheduleNewOrchestrationInstance(waitForEvent, String.valueOf(i), instanceId); + return instanceId; + }).collect(Collectors.toUnmodifiableList()).forEach(id -> { + try { + client.waitForInstanceStart(id, defaultTimeout); + } catch (TimeoutException e) { + e.printStackTrace(); + } + }); + + // Create one query object and reuse it for multiple queries + OrchestrationStatusQuery query = new OrchestrationStatusQuery(); + OrchestrationStatusQueryResult result = null; + + // Return all instances + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test CreatedTimeTo filter + query.setCreatedTimeTo(startTime.minus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty(), + "Result should be empty but found " + result.getOrchestrationState().size() + " instances: " + + "Start time: " + startTime + ", " + + result.getOrchestrationState().stream() + .map(state -> String.format("\nID: %s, Status: %s, Created: %s", + state.getInstanceId(), + state.getRuntimeStatus(), + state.getCreatedAt())) + .collect(Collectors.joining(", "))); + + query.setCreatedTimeTo(sequencesFinishedTime); + result = client.queryInstances(query); + // Verify all returned instances contain "sequence" in their IDs + assertEquals(5, result.getOrchestrationState().stream() + .filter(state -> state.getInstanceId().contains("sequence")) + .count(), + "Expected exactly 5 instances with 'sequence' in their IDs"); + + query.setCreatedTimeTo(Instant.now().plus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test CreatedTimeFrom filter + query.setCreatedTimeFrom(Instant.now().plus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty()); + + query.setCreatedTimeFrom(sequencesFinishedTime.minus(Duration.ofSeconds(5))); + result = client.queryInstances(query); + assertEquals(5, result.getOrchestrationState().stream() + .filter(state -> state.getInstanceId().contains("sequence")) + .count(), + "Expected exactly 5 instances with 'sequence' in their IDs"); + + query.setCreatedTimeFrom(startTime.minus(Duration.ofSeconds(1))); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test RuntimeStatus filter + HashSet statusFilters = Stream.of( + OrchestrationRuntimeStatus.PENDING, + OrchestrationRuntimeStatus.FAILED, + OrchestrationRuntimeStatus.TERMINATED + ).collect(Collectors.toCollection(HashSet::new)); + + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty()); + + statusFilters.add(OrchestrationRuntimeStatus.RUNNING); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(5, result.getOrchestrationState().size()); + + statusFilters.add(OrchestrationRuntimeStatus.COMPLETED); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + statusFilters.remove(OrchestrationRuntimeStatus.RUNNING); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(5, result.getOrchestrationState().size()); + + statusFilters.clear(); + query.setRuntimeStatusList(new ArrayList<>(statusFilters)); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test InstanceIdPrefix + query.setInstanceIdPrefix("Foo"); + result = client.queryInstances(query); + assertTrue(result.getOrchestrationState().isEmpty()); + + query.setInstanceIdPrefix(prefix); + result = client.queryInstances(query); + assertEquals(10, result.getOrchestrationState().size()); + + // Test PageSize and ContinuationToken + HashSet instanceIds = new HashSet<>(); + query.setMaxInstanceCount(0); + while (query.getMaxInstanceCount() < 10) { + query.setMaxInstanceCount(query.getMaxInstanceCount() + 1); + result = client.queryInstances(query); + int total = result.getOrchestrationState().size(); + assertEquals(query.getMaxInstanceCount(), total); + result.getOrchestrationState().forEach(state -> assertTrue(instanceIds.add(state.getInstanceId()))); + while (total < 10) { + query.setContinuationToken(result.getContinuationToken()); + result = client.queryInstances(query); + int count = result.getOrchestrationState().size(); + assertNotEquals(0, count); + assertTrue(count <= query.getMaxInstanceCount()); + total += count; + assertTrue(total <= 10); + result.getOrchestrationState().forEach(state -> assertTrue(instanceIds.add(state.getInstanceId()))); + } + query.setContinuationToken(null); + instanceIds.clear(); + } + + // Test ShowInput + query.setFetchInputsAndOutputs(true); + query.setCreatedTimeFrom(sequencesFinishedTime); + result = client.queryInstances(query); + result.getOrchestrationState().forEach(state -> assertNotNull(state.readInputAs(String.class))); + + query.setFetchInputsAndOutputs(false); + query.setCreatedTimeFrom(sequencesFinishedTime); + result = client.queryInstances(query); + result.getOrchestrationState().forEach(state -> assertThrows(IllegalStateException.class, () -> state.readInputAs(String.class))); + } + } + + @Test + void purgeInstanceId() throws TimeoutException { + final String orchestratorName = "PurgeInstance"; + final String plusOneActivityName = "PlusOne"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOneActivityName, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOneActivityName, ctx -> ctx.getInput(int.class) + 1) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + PurgeResult result = client.purgeInstance(instanceId); + assertEquals(1, result.getDeletedInstanceCount()); + + metadata = client.getInstanceMetadata(instanceId, true); + assertFalse(metadata.isInstanceFound()); + } + } + + @Test + @Disabled("Test is disabled as is not supported by the sidecar") + void purgeInstanceFilter() throws TimeoutException { + final String orchestratorName = "PurgeInstance"; + final String plusOne = "PlusOne"; + final String plusTwo = "PlusTwo"; + final String terminate = "Termination"; + + final Duration delay = Duration.ofSeconds(1); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOne, ctx -> ctx.getInput(int.class) + 1) + .addOrchestrator(plusOne, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addOrchestrator(plusTwo, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusTwo, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusTwo, ctx -> ctx.getInput(int.class) + 2) + .addOrchestrator(terminate, ctx -> ctx.createTimer(delay).await()) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + Instant startTime = Instant.now(); + + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + // Test CreatedTimeFrom + PurgeInstanceCriteria criteria = new PurgeInstanceCriteria(); + criteria.setCreatedTimeFrom(startTime.minus(Duration.ofSeconds(1))); + + PurgeResult result = client.purgeInstances(criteria); + assertEquals(1, result.getDeletedInstanceCount()); + metadata = client.getInstanceMetadata(instanceId, true); + assertFalse(metadata.isInstanceFound()); + + // Test CreatedTimeTo + criteria.setCreatedTimeTo(Instant.now()); + + result = client.purgeInstances(criteria); + assertEquals(0, result.getDeletedInstanceCount()); + metadata = client.getInstanceMetadata(instanceId, true); + assertFalse(metadata.isInstanceFound()); + + // Test CreatedTimeFrom, CreatedTimeTo, and RuntimeStatus + String instanceId1 = client.scheduleNewOrchestrationInstance(plusOne, 0); + metadata = client.waitForInstanceCompletion(instanceId1, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + String instanceId2 = client.scheduleNewOrchestrationInstance(plusTwo, 10); + metadata = client.waitForInstanceCompletion(instanceId2, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(12, metadata.readOutputAs(int.class)); + + String instanceId3 = client.scheduleNewOrchestrationInstance(terminate); + client.terminate(instanceId3, terminate); + metadata = client.waitForInstanceCompletion(instanceId3, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.TERMINATED, metadata.getRuntimeStatus()); + assertEquals(terminate, metadata.readOutputAs(String.class)); + + HashSet runtimeStatusFilters = Stream.of( + OrchestrationRuntimeStatus.TERMINATED, + OrchestrationRuntimeStatus.COMPLETED + ).collect(Collectors.toCollection(HashSet::new)); + + criteria.setCreatedTimeTo(Instant.now()); + criteria.setRuntimeStatusList(new ArrayList<>(runtimeStatusFilters)); + result = client.purgeInstances(criteria); + + assertEquals(3, result.getDeletedInstanceCount()); + metadata = client.getInstanceMetadata(instanceId1, true); + assertFalse(metadata.isInstanceFound()); + metadata = client.getInstanceMetadata(instanceId2, true); + assertFalse(metadata.isInstanceFound()); + metadata = client.getInstanceMetadata(instanceId3, true); + assertFalse(metadata.isInstanceFound()); + } + } + + @Test + void purgeInstanceFilterTimeout() throws TimeoutException { + final String orchestratorName = "PurgeInstance"; + final String plusOne = "PlusOne"; + final String plusTwo = "PlusTwo"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOne, ctx -> ctx.getInput(int.class) + 1) + .addOrchestrator(plusOne, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOne, value, int.class).await(); + ctx.complete(value); + }) + .addOrchestrator(plusTwo, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusTwo, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusTwo, ctx -> ctx.getInput(int.class) + 2) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + Instant startTime = Instant.now(); + + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata metadata = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + String instanceId1 = client.scheduleNewOrchestrationInstance(plusOne, 0); + metadata = client.waitForInstanceCompletion(instanceId1, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(1, metadata.readOutputAs(int.class)); + + String instanceId2 = client.scheduleNewOrchestrationInstance(plusTwo, 10); + metadata = client.waitForInstanceCompletion(instanceId2, defaultTimeout, true); + assertNotNull(metadata); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, metadata.getRuntimeStatus()); + assertEquals(12, metadata.readOutputAs(int.class)); + + PurgeInstanceCriteria criteria = new PurgeInstanceCriteria(); + criteria.setCreatedTimeFrom(startTime); + criteria.setTimeout(Duration.ofNanos(1)); + + assertThrows(TimeoutException.class, () -> client.purgeInstances(criteria)); + } + } + + @Test + void waitForInstanceStartThrowsException() { + final String orchestratorName = "orchestratorName"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + // The orchestration remains in the "Pending" state until the first await statement + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + var instanceId = UUID.randomUUID().toString(); + Thread thread = new Thread(() -> { + client.scheduleNewOrchestrationInstance(orchestratorName, null, instanceId); + }); + thread.start(); + + assertThrows(TimeoutException.class, () -> client.waitForInstanceStart(instanceId, Duration.ofSeconds(2))); + } + } + + @Test + void waitForInstanceCompletionThrowsException() { + final String orchestratorName = "orchestratorName"; + final String plusOneActivityName = "PlusOne"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + int value = ctx.getInput(int.class); + value = ctx.callActivity(plusOneActivityName, value, int.class).await(); + ctx.complete(value); + }) + .addActivity(plusOneActivityName, ctx -> { + try { + // The orchestration is started but not completed within the orchestration completion timeout due the below activity delay + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + return ctx.getInput(int.class) + 1; + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + assertThrows(TimeoutException.class, () -> client.waitForInstanceCompletion(instanceId, Duration.ofSeconds(2), false)); + } + } + + @Test + void activityFanOutWithException() throws TimeoutException { + final String orchestratorName = "ActivityFanOut"; + final String activityName = "Divide"; + final int count = 10; + final String exceptionMessage = "2 out of 6 tasks failed with an exception. See the exceptions list for details."; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + // Schedule each task to run in parallel + List> parallelTasks = IntStream.of(1, 2, 0, 4, 0, 6) + .mapToObj(i -> ctx.callActivity(activityName, i, Integer.class)) + .collect(Collectors.toList()); + + // Wait for all tasks to complete + try { + List results = ctx.allOf(parallelTasks).await(); + ctx.complete(results); + } catch (CompositeTaskFailedException e) { + assertNotNull(e); + assertEquals(2, e.getExceptions().size()); + assertEquals(TaskFailedException.class, e.getExceptions().get(0).getClass()); + assertEquals(TaskFailedException.class, e.getExceptions().get(1).getClass()); + // taskId in the exception below is based on parallelTasks input + assertEquals(getExceptionMessage(activityName, 2, "/ by zero"), e.getExceptions().get(0).getMessage()); + assertEquals(getExceptionMessage(activityName, 4, "/ by zero"), e.getExceptions().get(1).getMessage()); + throw e; + } + }) + .addActivity(activityName, ctx -> count / ctx.getInput(Integer.class)) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + List output = instance.readOutputAs(List.class); + assertNull(output); + + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + assertEquals(exceptionMessage, details.getErrorMessage()); + assertEquals("io.dapr.durabletask.CompositeTaskFailedException", details.getErrorType()); + assertNotNull(details.getStackTrace()); + } + } + + private static String getExceptionMessage(String taskName, int expectedTaskId, String expectedExceptionMessage) { + return String.format( + "Task '%s' (#%d) failed with an unhandled exception: %s", + taskName, + expectedTaskId, + expectedExceptionMessage); + } + + @Test + void thenApply() throws IOException, InterruptedException, TimeoutException { + final String orchestratorName = "thenApplyActivity"; + final String activityName = "Echo"; + final String suffix = "-test"; + final String input = Instant.now().toString(); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + String activityInput = ctx.getInput(String.class); + String output = ctx.callActivity(activityName, activityInput, String.class).thenApply(s -> s + suffix).await(); + ctx.complete(output); + }) + .addActivity(activityName, ctx -> { + return String.format("Hello, %s!", ctx.getInput(String.class)); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, input); + OrchestrationMetadata instance = client.waitForInstanceCompletion( + instanceId, + defaultTimeout, + true); + + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + String output = instance.readOutputAs(String.class); + String expected = String.format("Hello, %s!%s", input, suffix); + assertEquals(expected, output); + } + } + + @Test + void externalEventThenAccept() throws InterruptedException, TimeoutException { + final String orchestratorName = "continueAsNewWithExternalEvents"; + final String eventName = "MyEvent"; + final int expectedEventCount = 10; + DurableTaskGrpcWorker worker = this.createWorkerBuilder().addOrchestrator(orchestratorName, ctx -> { + int receivedEventCount = ctx.getInput(int.class); + + if (receivedEventCount < expectedEventCount) { + ctx.waitForExternalEvent(eventName, int.class) + .thenAccept(s -> { + ctx.continueAsNew(receivedEventCount + 1); + return; + }) + .await(); + } else { + ctx.complete(receivedEventCount); + } + }).buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + + for (int i = 0; i < expectedEventCount; i++) { + client.raiseEvent(instanceId, eventName, i); + } + + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(expectedEventCount, instance.readOutputAs(int.class)); + } + } + + @Test + void activityAllOf() throws IOException, TimeoutException { + final String orchestratorName = "ActivityAllOf"; + final String activityName = "ToString"; + final String retryActivityName = "RetryToString"; + final int activityMiddle = 5; + final int activityCount = 10; + final AtomicBoolean throwException = new AtomicBoolean(true); + final RetryPolicy retryPolicy = new RetryPolicy(2, Duration.ofSeconds(5)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + List> parallelTasks = IntStream.range(0, activityMiddle * 2) + .mapToObj(i -> { + if (i < activityMiddle) { + return ctx.callActivity(activityName, i, String.class); + } else { + return ctx.callActivity(retryActivityName, i, taskOptions, String.class); + } + }) + .collect(Collectors.toList()); + + // Wait for all tasks to complete, then sort and reverse the results + List results = ctx.allOf(parallelTasks).await(); + Collections.sort(results); + Collections.reverse(results); + ctx.complete(results); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .addActivity(retryActivityName, ctx -> { + if (throwException.get()) { + throwException.compareAndSet(true, false); + throw new RuntimeException("test retry"); + } + return ctx.getInput(Object.class).toString(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + List output = instance.readOutputAs(List.class); + assertNotNull(output); + assertEquals(activityCount, output.size()); + assertEquals(String.class, output.get(0).getClass()); + + // Expected: ["9", "8", "7", "6", "5", "4", "3", "2", "1", "0"] + for (int i = 0; i < activityCount; i++) { + String expected = String.valueOf(activityCount - i - 1); + assertEquals(expected, output.get(i).toString()); + } + } + } + + @Test + void activityAllOfException() throws IOException, TimeoutException { + final String orchestratorName = "ActivityAllOf"; + final String activityName = "ToString"; + final String retryActivityName = "RetryToStringException"; + final String result = "test fail"; + final int activityMiddle = 5; + final RetryPolicy retryPolicy = new RetryPolicy(2, Duration.ofSeconds(5)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + List> parallelTasks = IntStream.range(0, activityMiddle * 2) + .mapToObj(i -> { + if (i < activityMiddle) { + return ctx.callActivity(activityName, i, String.class); + } else { + return ctx.callActivity(retryActivityName, i, taskOptions, String.class); + } + }) + .collect(Collectors.toList()); + + // Wait for all tasks to complete, then sort and reverse the results + try { + List results = null; + results = ctx.allOf(parallelTasks).await(); + Collections.sort(results); + Collections.reverse(results); + ctx.complete(results); + } catch (CompositeTaskFailedException e) { + // only catch this type of exception to ensure the expected type of exception is thrown out. + for (Exception exception : e.getExceptions()) { + if (exception instanceof TaskFailedException) { + TaskFailedException taskFailedException = (TaskFailedException) exception; + System.out.println("Task: " + taskFailedException.getTaskName() + + " Failed for cause: " + taskFailedException.getErrorDetails().getErrorMessage()); + } + } + } + ctx.complete(result); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .addActivity(retryActivityName, ctx -> { + // only throw exception + throw new RuntimeException("test retry"); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertEquals(String.class, output.getClass()); + assertEquals(result, output); + } + } + + @Test + void activityAnyOf() throws IOException, TimeoutException { + final String orchestratorName = "ActivityAnyOf"; + final String activityName = "ToString"; + final String retryActivityName = "RetryToString"; + final int activityMiddle = 5; + final int activityCount = 10; + final AtomicBoolean throwException = new AtomicBoolean(true); + final RetryPolicy retryPolicy = new RetryPolicy(2, Duration.ofSeconds(5)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + List> parallelTasks = IntStream.range(0, activityMiddle * 2) + .mapToObj(i -> { + if (i < activityMiddle) { + return ctx.callActivity(activityName, i, String.class); + } else { + return ctx.callActivity(retryActivityName, i, taskOptions, String.class); + } + }) + .collect(Collectors.toList()); + + String results = (String) ctx.anyOf(parallelTasks).await().await(); + ctx.complete(results); + }) + .addActivity(activityName, ctx -> ctx.getInput(Object.class).toString()) + .addActivity(retryActivityName, ctx -> { + if (throwException.get()) { + throwException.compareAndSet(true, false); + throw new RuntimeException("test retry"); + } + return ctx.getInput(Object.class).toString(); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + + String output = instance.readOutputAs(String.class); + assertNotNull(output); + assertTrue(Integer.parseInt(output) >= 0 && Integer.parseInt(output) < activityCount); + } + } + + @Test + public void newUUIDTest() { + String orchestratorName = "test-new-uuid"; + String echoActivityName = "Echo"; + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + // Test 1: Ensure two consequiteively created GUIDs are not unique + UUID currentUUID0 = ctx.newUuid(); + UUID currentUUID1 = ctx.newUuid(); + if (currentUUID0.equals(currentUUID1)) { + ctx.complete(false); + } + + // Test 2: Ensure that the same GUID values are created on each replay + UUID originalUUID1 = ctx.callActivity(echoActivityName, currentUUID1, UUID.class).await(); + if (!currentUUID1.equals(originalUUID1)) { + ctx.complete(false); + } + + // Test 3: Ensure that the same UUID values are created on each replay even after an await + UUID currentUUID2 = ctx.newUuid(); + UUID originalUUID2 = ctx.callActivity(echoActivityName, currentUUID2, UUID.class).await(); + if (!currentUUID2.equals(originalUUID2)) { + ctx.complete(false); + } + + // Test 4: Finish confirming that every generated UUID is unique + if (currentUUID1.equals(currentUUID2)) ctx.complete(false); + else ctx.complete(true); + }) + .addActivity(echoActivityName, ctx -> { + System.out.println("##### echoActivityName: " + ctx.getInput(UUID.class)); + return ctx.getInput(UUID.class); + }) + .buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertTrue(instance.readOutputAs(boolean.class)); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + } + + + @Test + public void taskExecutionIdTest() { + var orchestratorName = "test-task-execution-id"; + var retryActivityName = "RetryN"; + final RetryPolicy retryPolicy = new RetryPolicy(4, Duration.ofSeconds(3)); + final TaskOptions taskOptions = TaskOptions.withRetryPolicy(retryPolicy); + + var execMap = new HashMap(); + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + ctx.callActivity(retryActivityName, null, taskOptions).await(); + ctx.callActivity(retryActivityName, null, taskOptions).await(); + ctx.complete(true); + }) + .addActivity(retryActivityName, ctx -> { + System.out.println("##### RetryN[executionId]: " + ctx.getTaskExecutionId()); + var c = execMap.get(ctx.getTaskExecutionId()); + if (c == null) { + c = 0; + } else { + c++; + } + + execMap.put(ctx.getTaskExecutionId(), c); + if (c < 2) { + throw new RuntimeException("test retry"); + } + return null; + }) + .buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + assertEquals(2, execMap.size()); + assertTrue(instance.readOutputAs(boolean.class)); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } + + } + +} + + diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java new file mode 100644 index 0000000000..b60b26be74 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/DurableTaskGrpcClientTlsTest.java @@ -0,0 +1,342 @@ +///* +// * Copyright 2025 The Dapr Authors +// * Licensed under the Apache License, Version 2.0 (the "License"); +// * you may not use this file except in compliance with the License. +// * You may obtain a copy of the License at +// * http://www.apache.org/licenses/LICENSE-2.0 +// * Unless required by applicable law or agreed to in writing, software +// * distributed under the License is distributed on an "AS IS" BASIS, +// * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// * See the License for the specific language governing permissions and +//limitations under the License. +//*/ +//package io.dapr.durabletask; +// +//import org.junit.jupiter.api.AfterEach; +//import org.junit.jupiter.api.Test; +//import org.junit.jupiter.api.io.TempDir; +//import org.junit.jupiter.api.condition.EnabledOnOs; +//import org.junit.jupiter.api.condition.OS; +//import org.junit.jupiter.api.Assumptions; +// +//import java.io.File; +//import java.nio.file.Files; +//import java.nio.file.Path; +//import java.security.KeyPair; +//import java.security.KeyPairGenerator; +//import java.security.cert.X509Certificate; +//import java.util.Base64; +//import java.util.Date; +//import java.math.BigInteger; +// +//import org.bouncycastle.asn1.x500.X500Name; +//import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo; +//import org.bouncycastle.cert.X509v3CertificateBuilder; +//import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; +//import org.bouncycastle.operator.ContentSigner; +//import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; +// +//import static org.junit.jupiter.api.Assertions.*; +// +//public class DurableTaskGrpcClientTlsTest { +// private static final int DEFAULT_PORT = 4001; +// private static final String DEFAULT_SIDECAR_IP = "127.0.0.1"; +// +// @TempDir +// Path tempDir; +// +// // Track the client for cleanup +// private DurableTaskGrpcClient client; +// +// @AfterEach +// void tearDown() throws Exception { +// if (client != null) { +// client.close(); +// client = null; +// } +// } +// +// // Helper method to generate a key pair for testing +// private static KeyPair generateKeyPair() throws Exception { +// KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA"); +// keyPairGenerator.initialize(2048); +// return keyPairGenerator.generateKeyPair(); +// } +// +// // Helper method to generate a self-signed certificate +// private static X509Certificate generateCertificate(KeyPair keyPair) throws Exception { +// X500Name issuer = new X500Name("CN=Test Certificate"); +// X500Name subject = new X500Name("CN=Test Certificate"); +// Date notBefore = new Date(System.currentTimeMillis() - 24 * 60 * 60 * 1000); +// Date notAfter = new Date(System.currentTimeMillis() + 365 * 24 * 60 * 60 * 1000L); +// SubjectPublicKeyInfo publicKeyInfo = SubjectPublicKeyInfo.getInstance(keyPair.getPublic().getEncoded()); +// X509v3CertificateBuilder certBuilder = new X509v3CertificateBuilder( +// issuer, +// BigInteger.valueOf(System.currentTimeMillis()), +// notBefore, +// notAfter, +// subject, +// publicKeyInfo +// ); +// ContentSigner signer = new JcaContentSignerBuilder("SHA256withRSA").build(keyPair.getPrivate()); +// return new JcaX509CertificateConverter().getCertificate(certBuilder.build(signer)); +// } +// +// private static void writeCertificateToFile(X509Certificate cert, File file) throws Exception { +// String certPem = "-----BEGIN CERTIFICATE-----\n" + +// Base64.getEncoder().encodeToString(cert.getEncoded()) + +// "\n-----END CERTIFICATE-----"; +// Files.write(file.toPath(), certPem.getBytes()); +// } +// +// private static void writePrivateKeyToFile(KeyPair keyPair, File file) throws Exception { +// String keyPem = "-----BEGIN PRIVATE KEY-----\n" + +// Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()) + +// "\n-----END PRIVATE KEY-----"; +// Files.write(file.toPath(), keyPem.getBytes()); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTls() throws Exception { +// // Generate test certificate and key +// KeyPair keyPair = generateKeyPair(); +// X509Certificate cert = generateCertificate(keyPair); +// +// File certFile = File.createTempFile("test-cert", ".pem"); +// File keyFile = File.createTempFile("test-key", ".pem"); +// try { +// writeCertificateToFile(cert, certFile); +// writePrivateKeyToFile(keyPair, keyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCertPath(certFile.getAbsolutePath()) +// .tlsKeyPath(keyFile.getAbsolutePath()) +// .build(); +// +// assertNotNull(client); +// // Note: We can't easily test the actual TLS configuration without a real server +// } finally { +// certFile.delete(); +// keyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndEndpoint() throws Exception { +// // Generate test certificate and key +// KeyPair keyPair = generateKeyPair(); +// X509Certificate cert = generateCertificate(keyPair); +// +// File certFile = File.createTempFile("test-cert", ".pem"); +// File keyFile = File.createTempFile("test-key", ".pem"); +// try { +// writeCertificateToFile(cert, certFile); +// writePrivateKeyToFile(keyPair, keyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCertPath(certFile.getAbsolutePath()) +// .tlsKeyPath(keyFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// certFile.delete(); +// keyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInvalidTlsCert() { +// assertThrows(RuntimeException.class, () -> { +// new DurableTaskGrpcClientBuilder() +// .tlsCertPath("/nonexistent/cert.pem") +// .tlsKeyPath("/nonexistent/key.pem") +// .build(); +// }); +// } +// +// @Test +// @EnabledOnOs({OS.LINUX, OS.MAC}) +// public void testBuildGrpcManagedChannelWithTlsAndUnixSocket() throws Exception { +// // Skip this test since Unix socket support is not implemented yet +// Assumptions.assumeTrue(false, "Unix socket support not implemented yet"); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndDnsAuthority() throws Exception { +// // Generate test certificate and key +// KeyPair keyPair = generateKeyPair(); +// X509Certificate cert = generateCertificate(keyPair); +// +// File certFile = File.createTempFile("test-cert", ".pem"); +// File keyFile = File.createTempFile("test-key", ".pem"); +// try { +// writeCertificateToFile(cert, certFile); +// writePrivateKeyToFile(keyPair, keyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCertPath(certFile.getAbsolutePath()) +// .tlsKeyPath(keyFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// certFile.delete(); +// keyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndCaCert() throws Exception { +// // Generate test CA certificate +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithTlsAndCaCertAndEndpoint() throws Exception { +// // Generate test CA certificate +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInvalidCaCert() { +// assertThrows(RuntimeException.class, () -> { +// new DurableTaskGrpcClientBuilder() +// .tlsCaPath("/nonexistent/ca.pem") +// .build(); +// }); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithMtlsAndCaCert() throws Exception { +// // Generate test certificates +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// KeyPair clientKeyPair = generateKeyPair(); +// X509Certificate clientCert = generateCertificate(clientKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// File clientCertFile = File.createTempFile("test-client-cert", ".pem"); +// File clientKeyFile = File.createTempFile("test-client-key", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// writeCertificateToFile(clientCert, clientCertFile); +// writePrivateKeyToFile(clientKeyPair, clientKeyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .tlsCertPath(clientCertFile.getAbsolutePath()) +// .tlsKeyPath(clientKeyFile.getAbsolutePath()) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// clientCertFile.delete(); +// clientKeyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInsecureTls() throws Exception { +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .insecure(true) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInsecureTlsAndMtls() throws Exception { +// // Generate test certificates +// KeyPair caKeyPair = generateKeyPair(); +// X509Certificate caCert = generateCertificate(caKeyPair); +// KeyPair clientKeyPair = generateKeyPair(); +// X509Certificate clientCert = generateCertificate(clientKeyPair); +// +// File caCertFile = File.createTempFile("test-ca-cert", ".pem"); +// File clientCertFile = File.createTempFile("test-client-cert", ".pem"); +// File clientKeyFile = File.createTempFile("test-client-key", ".pem"); +// try { +// writeCertificateToFile(caCert, caCertFile); +// writeCertificateToFile(clientCert, clientCertFile); +// writePrivateKeyToFile(clientKeyPair, clientKeyFile); +// +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .insecure(true) +// .tlsCaPath(caCertFile.getAbsolutePath()) +// .tlsCertPath(clientCertFile.getAbsolutePath()) +// .tlsKeyPath(clientKeyFile.getAbsolutePath()) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } finally { +// caCertFile.delete(); +// clientCertFile.delete(); +// clientKeyFile.delete(); +// } +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithInsecureTlsAndCustomEndpoint() throws Exception { +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .insecure(true) +// .port(443) +// .build(); +// +// assertNotNull(client); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithPlaintext() throws Exception { +// // No TLS config provided, should use plaintext +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .port(443) +// .build(); +// +// assertNotNull(client); +// } +// +// @Test +// public void testBuildGrpcManagedChannelWithPlaintextAndCustomEndpoint() throws Exception { +// // No TLS config provided, should use plaintext +// client = (DurableTaskGrpcClient) new DurableTaskGrpcClientBuilder() +// .port(50001) // Custom port +// .build(); +// +// assertNotNull(client); +// } +//} \ No newline at end of file diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java b/durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java new file mode 100644 index 0000000000..f1c868f0a4 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/ErrorHandlingIT.java @@ -0,0 +1,306 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * These integration tests are designed to exercise the core, high-level error-handling features of the Durable Task + * programming model. + *

+ * These tests currently require a sidecar process to be running on the local machine (the sidecar is what accepts the + * client operations and sends invocation instructions to the DurableTaskWorker). + */ +@Tag("integration") +public class ErrorHandlingIT extends IntegrationTestBase { + @Test + void orchestratorException() throws TimeoutException { + final String orchestratorName = "OrchestratorWithException"; + final String errorMessage = "Kah-BOOOOOM!!!"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + throw new RuntimeException(errorMessage); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 0); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + assertEquals("java.lang.RuntimeException", details.getErrorType()); + assertTrue(details.getErrorMessage().contains(errorMessage)); + assertNotNull(details.getStackTrace()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void activityException(boolean handleException) throws TimeoutException { + final String orchestratorName = "OrchestratorWithActivityException"; + final String activityName = "Throw"; + final String errorMessage = "Kah-BOOOOOM!!!"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + ctx.callActivity(activityName).await(); + } catch (TaskFailedException ex) { + if (handleException) { + ctx.complete("handled"); + } else { + throw ex; + } + } + }) + .addActivity(activityName, ctx -> { + throw new RuntimeException(errorMessage); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, ""); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + + if (handleException) { + String result = instance.readOutputAs(String.class); + assertNotNull(result); + assertEquals("handled", result); + } else { + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + + String expectedMessage = String.format( + "Task '%s' (#0) failed with an unhandled exception: %s", + activityName, + errorMessage); + assertEquals(expectedMessage, details.getErrorMessage()); + assertEquals("io.dapr.durabletask.TaskFailedException", details.getErrorType()); + assertNotNull(details.getStackTrace()); + // CONSIDER: Additional validation of getErrorDetails? + } + } + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retryActivityFailures(int maxNumberOfAttempts) throws TimeoutException { + // There is one task for each activity call and one task between each retry + int expectedTaskCount = (maxNumberOfAttempts * 2) - 1; + this.retryOnFailuresCoreTest(maxNumberOfAttempts, expectedTaskCount, ctx -> { + RetryPolicy retryPolicy = getCommonRetryPolicy(maxNumberOfAttempts); + ctx.callActivity( + "BustedActivity", + null, + TaskOptions.withRetryPolicy(retryPolicy)).await(); + }); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retryActivityFailuresWithCustomLogic(int maxNumberOfAttempts) throws TimeoutException { + // This gets incremented every time the retry handler is invoked + AtomicInteger retryHandlerCalls = new AtomicInteger(); + + // Run the test and get back the details of the last failure + this.retryOnFailuresCoreTest(maxNumberOfAttempts, maxNumberOfAttempts, ctx -> { + RetryHandler retryHandler = getCommonRetryHandler(retryHandlerCalls, maxNumberOfAttempts); + TaskOptions options = TaskOptions.withRetryHandler(retryHandler); + ctx.callActivity("BustedActivity", null, options).await(); + }); + + // Assert that the retry handle got invoked the expected number of times + assertEquals(maxNumberOfAttempts, retryHandlerCalls.get()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void subOrchestrationException(boolean handleException) throws TimeoutException { + final String orchestratorName = "OrchestrationWithBustedSubOrchestrator"; + final String subOrchestratorName = "BustedSubOrchestrator"; + final String errorMessage = "Kah-BOOOOOM!!!"; + + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, ctx -> { + try { + String result = ctx.callSubOrchestrator(subOrchestratorName, "", String.class).await(); + ctx.complete(result); + } catch (TaskFailedException ex) { + if (handleException) { + ctx.complete("handled"); + } else { + throw ex; + } + } + }) + .addOrchestrator(subOrchestratorName, ctx -> { + throw new RuntimeException(errorMessage); + }) + .buildAndStart(); + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, 1); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + if (handleException) { + assertEquals(OrchestrationRuntimeStatus.COMPLETED, instance.getRuntimeStatus()); + String result = instance.readOutputAs(String.class); + assertNotNull(result); + assertEquals("handled", result); + } else { + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + String expectedMessage = String.format( + "Task '%s' (#0) failed with an unhandled exception: %s", + subOrchestratorName, + errorMessage); + assertEquals(expectedMessage, details.getErrorMessage()); + assertEquals("io.dapr.durabletask.TaskFailedException", details.getErrorType()); + assertNotNull(details.getStackTrace()); + // CONSIDER: Additional validation of getStackTrace? + } + } + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retrySubOrchestratorFailures(int maxNumberOfAttempts) throws TimeoutException { + // There is one task for each sub-orchestrator call and one task between each retry + int expectedTaskCount = (maxNumberOfAttempts * 2) - 1; + this.retryOnFailuresCoreTest(maxNumberOfAttempts, expectedTaskCount, ctx -> { + RetryPolicy retryPolicy = getCommonRetryPolicy(maxNumberOfAttempts); + ctx.callSubOrchestrator( + "BustedSubOrchestrator", + null, + null, + TaskOptions.withRetryPolicy(retryPolicy)).await(); + }); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 10}) + public void retrySubOrchestrationFailuresWithCustomLogic(int maxNumberOfAttempts) throws TimeoutException { + // This gets incremented every time the retry handler is invoked + AtomicInteger retryHandlerCalls = new AtomicInteger(); + + // Run the test and get back the details of the last failure + this.retryOnFailuresCoreTest(maxNumberOfAttempts, maxNumberOfAttempts, ctx -> { + RetryHandler retryHandler = getCommonRetryHandler(retryHandlerCalls, maxNumberOfAttempts); + TaskOptions options = TaskOptions.withRetryHandler(retryHandler); + ctx.callSubOrchestrator("BustedSubOrchestrator", null, null, options).await(); + }); + + // Assert that the retry handle got invoked the expected number of times + assertEquals(maxNumberOfAttempts, retryHandlerCalls.get()); + } + + private static RetryPolicy getCommonRetryPolicy(int maxNumberOfAttempts) { + // Include a small delay between each retry to exercise the implicit timer path + return new RetryPolicy(maxNumberOfAttempts, Duration.ofMillis(1)); + } + + private static RetryHandler getCommonRetryHandler(AtomicInteger handlerInvocationCounter, int maxNumberOfAttempts) { + return ctx -> { + // Retry handlers get executed on the orchestrator thread and go through replay + if (!ctx.getOrchestrationContext().getIsReplaying()) { + handlerInvocationCounter.getAndIncrement(); + } + + // The isCausedBy() method is designed to handle exception inheritance + if (!ctx.getLastFailure().isCausedBy(Exception.class)) { + return false; + } + + // This is the actual exception type we care about + if (!ctx.getLastFailure().isCausedBy(RuntimeException.class)) { + return false; + } + + // Quit after N attempts + return ctx.getLastAttemptNumber() < maxNumberOfAttempts; + }; + } + + /** + * Shared logic for execution an orchestration with an activity that constantly fails. + * + * @param maxNumberOfAttempts The expected maximum number of activity execution attempts + * @param expectedTaskCount The expected number of tasks to be scheduled by the main orchestration. + * @param mainOrchestration The main orchestration implementation, which is expected to call either the + * "BustedActivity" activity or the "BustedSubOrchestrator" sub-orchestration. + * @return Returns the details of the last activity or sub-orchestration failure. + */ + private FailureDetails retryOnFailuresCoreTest( + int maxNumberOfAttempts, + int expectedTaskCount, + TaskOrchestration mainOrchestration) throws TimeoutException { + final String orchestratorName = "MainOrchestrator"; + + AtomicInteger actualAttemptCount = new AtomicInteger(); + + // The caller of this test provides the top-level orchestration implementation. This method provides both a + // failing sub-orchestration and a failing activity implementation for it to use. The expectation is that the + // main orchestration tries to invoke just one of them and is configured with retry configuration. + AtomicBoolean isActivityPath = new AtomicBoolean(false); + DurableTaskGrpcWorker worker = this.createWorkerBuilder() + .addOrchestrator(orchestratorName, mainOrchestration) + .addOrchestrator("BustedSubOrchestrator", ctx -> { + actualAttemptCount.getAndIncrement(); + throw new RuntimeException("Error #" + actualAttemptCount.get()); + }) + .addActivity("BustedActivity", ctx -> { + actualAttemptCount.getAndIncrement(); + isActivityPath.set(true); + throw new RuntimeException("Error #" + actualAttemptCount.get()); + }) + .buildAndStart(); + + DurableTaskClient client = new DurableTaskGrpcClientBuilder().build(); + try (worker; client) { + String instanceId = client.scheduleNewOrchestrationInstance(orchestratorName, ""); + OrchestrationMetadata instance = client.waitForInstanceCompletion(instanceId, defaultTimeout, true); + assertNotNull(instance); + assertEquals(OrchestrationRuntimeStatus.FAILED, instance.getRuntimeStatus()); + + // Make sure the exception details are still what we expect + FailureDetails details = instance.getFailureDetails(); + assertNotNull(details); + + // Confirm the number of attempts + assertEquals(maxNumberOfAttempts, actualAttemptCount.get()); + + return details; + } + } +} \ No newline at end of file diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java b/durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java new file mode 100644 index 0000000000..bbfcde0469 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/IntegrationTestBase.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import org.junit.jupiter.api.AfterEach; + +import java.time.Duration; + +public class IntegrationTestBase { + protected static final Duration defaultTimeout = Duration.ofSeconds(10); + + // All tests that create a server should save it to this variable for proper shutdown + private DurableTaskGrpcWorker server; + + @AfterEach + public void shutdown() { + if (this.server != null) { + this.server.stop(); + } + } + + + protected TestDurableTaskWorkerBuilder createWorkerBuilder() { + return new TestDurableTaskWorkerBuilder(); + } + + public class TestDurableTaskWorkerBuilder { + final DurableTaskGrpcWorkerBuilder innerBuilder; + + private TestDurableTaskWorkerBuilder() { + this.innerBuilder = new DurableTaskGrpcWorkerBuilder(); + } + + public DurableTaskGrpcWorker buildAndStart() { + DurableTaskGrpcWorker server = this.innerBuilder.build(); + IntegrationTestBase.this.server = server; + server.start(); + return server; + } + + public TestDurableTaskWorkerBuilder setMaximumTimerInterval(Duration maximumTimerInterval) { + this.innerBuilder.maximumTimerInterval(maximumTimerInterval); + return this; + } + + public TestDurableTaskWorkerBuilder addOrchestrator( + String name, + TaskOrchestration implementation) { + this.innerBuilder.addOrchestration(new TaskOrchestrationFactory() { + @Override + public String getName() { + return name; + } + + @Override + public TaskOrchestration create() { + return implementation; + } + }); + return this; + } + + public TestDurableTaskWorkerBuilder addActivity( + String name, + TaskActivity implementation) { + this.innerBuilder.addActivity(new TaskActivityFactory() { + @Override + public String getName() { + return name; + } + + @Override + public TaskActivity create() { + return implementation; + } + }); + return this; + } + } +} diff --git a/durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java b/durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java new file mode 100644 index 0000000000..43fad5f526 --- /dev/null +++ b/durabletask-client/src/test/java/io/dapr/durabletask/TaskOptionsTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.durabletask; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; + +import static org.junit.jupiter.api.Assertions.*; + +/** + * Unit tests for TaskOptions with cross-app workflow support. + */ +public class TaskOptionsTest { + + @Test + void taskOptionsWithAppID() { + TaskOptions options = TaskOptions.withAppID("app1"); + + assertTrue(options.hasAppID()); + assertEquals("app1", options.getAppID()); + assertFalse(options.hasRetryPolicy()); + assertFalse(options.hasRetryHandler()); + } + + @Test + void taskOptionsWithRetryPolicyAndAppID() { + RetryPolicy retryPolicy = new RetryPolicy(3, Duration.ofSeconds(1)); + TaskOptions options = TaskOptions.builder() + .retryPolicy(retryPolicy) + .appID("app2") + .build(); + + assertTrue(options.hasAppID()); + assertEquals("app2", options.getAppID()); + assertTrue(options.hasRetryPolicy()); + assertEquals(retryPolicy, options.getRetryPolicy()); + assertFalse(options.hasRetryHandler()); + } + + @Test + void taskOptionsWithRetryHandlerAndAppID() { + RetryHandler retryHandler = new RetryHandler() { + @Override + public boolean handle(RetryContext context) { + return context.getLastAttemptNumber() < 2; + } + }; + TaskOptions options = TaskOptions.builder() + .retryHandler(retryHandler) + .appID("app3") + .build(); + + assertTrue(options.hasAppID()); + assertEquals("app3", options.getAppID()); + assertFalse(options.hasRetryPolicy()); + assertTrue(options.hasRetryHandler()); + assertEquals(retryHandler, options.getRetryHandler()); + } + + @Test + void taskOptionsWithoutAppID() { + TaskOptions options = TaskOptions.create(); + + assertFalse(options.hasAppID()); + assertNull(options.getAppID()); + } + + @Test + void taskOptionsWithEmptyAppID() { + TaskOptions options = TaskOptions.withAppID(""); + + assertFalse(options.hasAppID()); + assertEquals("", options.getAppID()); + } + + @Test + void taskOptionsWithNullAppID() { + TaskOptions options = TaskOptions.builder().appID(null).build(); + + assertFalse(options.hasAppID()); + assertNull(options.getAppID()); + } + + @Test + void taskOptionsWithRetryPolicy() { + RetryPolicy retryPolicy = new RetryPolicy(5, Duration.ofMinutes(1)); + TaskOptions options = TaskOptions.withRetryPolicy(retryPolicy); + + assertTrue(options.hasRetryPolicy()); + assertEquals(retryPolicy, options.getRetryPolicy()); + assertFalse(options.hasRetryHandler()); + assertFalse(options.hasAppID()); + } + + @Test + void taskOptionsWithRetryHandler() { + RetryHandler retryHandler = new RetryHandler() { + @Override + public boolean handle(RetryContext context) { + return context.getLastAttemptNumber() < 3; + } + }; + TaskOptions options = TaskOptions.withRetryHandler(retryHandler); + + assertTrue(options.hasRetryHandler()); + assertEquals(retryHandler, options.getRetryHandler()); + assertFalse(options.hasRetryPolicy()); + assertFalse(options.hasAppID()); + } + + @Test + void taskOptionsWithBuilderChaining() { + RetryPolicy retryPolicy = new RetryPolicy(3, Duration.ofSeconds(1)); + RetryHandler retryHandler = context -> true; + + TaskOptions options = TaskOptions.builder() + .retryPolicy(retryPolicy) + .retryHandler(retryHandler) + .appID("test-app") + .build(); + + assertNotNull(options); + assertTrue(options.hasRetryPolicy()); + assertEquals(retryPolicy, options.getRetryPolicy()); + assertTrue(options.hasRetryHandler()); + assertEquals(retryHandler, options.getRetryHandler()); + assertTrue(options.hasAppID()); + assertEquals("test-app", options.getAppID()); + } +} \ No newline at end of file diff --git a/pom.xml b/pom.xml index b145738aa9..37c6ecea7d 100644 --- a/pom.xml +++ b/pom.xml @@ -17,6 +17,7 @@ 1.69.0 3.25.5 https://raw.githubusercontent.com/dapr/dapr/v1.16.0-rc.5/dapr/proto + https://raw.githubusercontent.com/dapr/durabletask-protobuf/main/protos/orchestrator_service.proto 1.17.0-SNAPSHOT 1.7.1 3.8.1 @@ -41,12 +42,7 @@ 11 11 true - - 2.16.1 + 2.16.2 true true ${maven.multiModuleProjectDirectory}/spotbugs-exclude.xml @@ -80,7 +76,6 @@ 5.7.0 1.7.0 3.5.12 - 1.5.10 2.2.2 2.0.9 3.11.2 @@ -359,6 +354,11 @@ junit-jupiter-engine ${junit-bom.version} + + org.junit.jupiter + junit-jupiter-engine + ${junit-bom.version} + org.junit.jupiter junit-jupiter-params @@ -369,11 +369,6 @@ reactor-core ${reactor.version} - - io.dapr - durabletask-client - ${durabletask-client.version} - com.redis testcontainers-redis @@ -496,6 +491,10 @@ check + + io/dapr/durabletask/**/* + io/dapr/springboot/examples/**/* + BUNDLE @@ -698,6 +697,7 @@ spring-boot-examples testcontainers-dapr + durabletask-client @@ -706,6 +706,7 @@ sdk-tests spring-boot-examples + durabletask-client diff --git a/sdk-workflows/pom.xml b/sdk-workflows/pom.xml index 1773d7fa04..7fd95807f1 100644 --- a/sdk-workflows/pom.xml +++ b/sdk-workflows/pom.xml @@ -45,27 +45,7 @@ io.dapr durabletask-client - - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 + ${project.parent.version} diff --git a/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java b/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java index 067850c938..4ccf73e9b9 100644 --- a/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java +++ b/sdk-workflows/src/main/java/io/dapr/workflows/runtime/DefaultWorkflowContext.java @@ -246,7 +246,7 @@ public void continueAsNew(Object input, boolean preserveUnprocessedEvents) { */ @Override public UUID newUuid() { - return this.innerContext.newUUID(); + return this.innerContext.newUuid(); } private TaskOptions toTaskOptions(WorkflowTaskOptions options) { diff --git a/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java b/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java index b573e26119..b6ca38ecbc 100644 --- a/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java +++ b/sdk-workflows/src/test/java/io/dapr/workflows/DefaultWorkflowContextTest.java @@ -422,7 +422,7 @@ public void setCustomStatusWorkflow() { @Test public void newUuidTest() { context.newUuid(); - verify(mockInnerContext, times(1)).newUUID(); + verify(mockInnerContext, times(1)).newUuid(); } @Test From 8e2a3ddb8d2ef06abae771a20225f3d944fd212c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 16 Dec 2025 14:34:53 -0800 Subject: [PATCH 14/17] Bump codecov/codecov-action from 5.5.1 to 5.5.2 (#1607) Bumps [codecov/codecov-action](https://github.com/codecov/codecov-action) from 5.5.1 to 5.5.2. - [Release notes](https://github.com/codecov/codecov-action/releases) - [Changelog](https://github.com/codecov/codecov-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/codecov/codecov-action/compare/v5.5.1...v5.5.2) --- updated-dependencies: - dependency-name: codecov/codecov-action dependency-version: 5.5.2 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Signed-off-by: salaboy --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 1252c515fa..b59173db88 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -32,7 +32,7 @@ jobs: - name: Run tests run: ./mvnw clean install -B -q -DskipITs=true - name: Codecov - uses: codecov/codecov-action@v5.5.1 + uses: codecov/codecov-action@v5.5.2 - name: Upload test report for sdk uses: actions/upload-artifact@v5 with: From 48eed5f05f61a1477b9d0f5ae9c4603a29eecf7f Mon Sep 17 00:00:00 2001 From: artur-ciocanu Date: Sat, 3 Jan 2026 04:15:48 +0200 Subject: [PATCH 15/17] Create Dapr WaitStrategy to improve ITs ergonomics (#1609) * Create Dapr WaitStrategy to improve ITs ergonomics Signed-off-by: Artur Ciocanu * Improve unit tests naming and coverage Signed-off-by: Artur Ciocanu * Fix a potential NPE and remove extra "for..." methods for pubsub and topic. Signed-off-by: Artur Ciocanu * Fix NPE properly for actor match Signed-off-by: Artur Ciocanu * Fix NPE another potential NPEs. Signed-off-by: Artur Ciocanu * Rename to use wait strategy to use abstract prefix. Signed-off-by: Artur Ciocanu * Add more tests to cover null checks Signed-off-by: Artur Ciocanu * Fix pubsub outbox IT. Signed-off-by: Artur Ciocanu * Ignore pubsub outbox for now. Signed-off-by: Artur Ciocanu * Disable pubsub outbox for now. Signed-off-by: Artur Ciocanu --------- Signed-off-by: Artur Ciocanu Signed-off-by: salaboy --- .../pubsub/outbox/DaprPubSubOutboxIT.java | 20 +- .../outbox/ProductWebhookController.java | 11 +- testcontainers-dapr/pom.xml | 4 + .../strategy/AbstractDaprWaitStrategy.java | 143 +++++++++++++ .../wait/strategy/ActorWaitStrategy.java | 68 ++++++ .../wait/strategy/DaprWait.java | 99 +++++++++ .../strategy/SubscriptionWaitStrategy.java | 68 ++++++ .../wait/strategy/metadata/Actor.java | 41 ++++ .../wait/strategy/metadata/Component.java | 61 ++++++ .../wait/strategy/metadata/Metadata.java | 82 ++++++++ .../wait/strategy/metadata/Subscription.java | 107 ++++++++++ .../wait/strategy/ActorWaitStrategyTest.java | 142 +++++++++++++ .../wait/strategy/DaprWaitTest.java | 110 ++++++++++ .../SubscriptionWaitStrategyTest.java | 154 ++++++++++++++ .../wait/strategy/metadata/MetadataTest.java | 197 ++++++++++++++++++ 15 files changed, 1298 insertions(+), 9 deletions(-) create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java create mode 100644 testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java create mode 100644 testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java index 423ae05e55..2aef82f14c 100644 --- a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/DaprPubSubOutboxIT.java @@ -23,11 +23,14 @@ import io.dapr.testcontainers.DaprLogLevel; import org.assertj.core.api.Assertions; import org.awaitility.Awaitility; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.test.context.DynamicPropertyRegistry; import org.springframework.test.context.DynamicPropertySource; @@ -44,6 +47,7 @@ import static io.dapr.it.testcontainers.ContainerConstants.DAPR_RUNTIME_IMAGE_TAG; +@Disabled("Unclear why this test is failing intermittently in CI") @SpringBootTest( webEnvironment = SpringBootTest.WebEnvironment.DEFINED_PORT, classes = { @@ -81,6 +85,9 @@ public class DaprPubSubOutboxIT { .withAppChannelAddress("host.testcontainers.internal") .withAppPort(PORT); + @Autowired + private ProductWebhookController productWebhookController; + /** * Expose the Dapr ports to the host. * @@ -93,17 +100,18 @@ static void daprProperties(DynamicPropertyRegistry registry) { registry.add("server.port", () -> PORT); } - - @BeforeEach - public void setUp() { + @BeforeAll + public static void beforeAll(){ org.testcontainers.Testcontainers.exposeHostPorts(PORT); } + @BeforeEach + public void beforeEach() { + Wait.forLogMessage(APP_FOUND_MESSAGE_PATTERN, 1).waitUntilReady(DAPR_CONTAINER); + } @Test public void shouldPublishUsingOutbox() throws Exception { - Wait.forLogMessage(APP_FOUND_MESSAGE_PATTERN, 1).waitUntilReady(DAPR_CONTAINER); - try (DaprClient client = DaprClientFactory.createDaprClientBuilder(DAPR_CONTAINER).build()) { ExecuteStateTransactionRequest transactionRequest = new ExecuteStateTransactionRequest(STATE_STORE_NAME); @@ -123,7 +131,7 @@ public void shouldPublishUsingOutbox() throws Exception { Awaitility.await().atMost(Duration.ofSeconds(10)) .ignoreExceptions() - .untilAsserted(() -> Assertions.assertThat(ProductWebhookController.EVENT_LIST).isNotEmpty()); + .untilAsserted(() -> Assertions.assertThat(productWebhookController.getEventList()).isNotEmpty()); } } diff --git a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java index 283dabf887..f35f335fe3 100644 --- a/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java +++ b/sdk-tests/src/test/java/io/dapr/it/testcontainers/pubsub/outbox/ProductWebhookController.java @@ -26,12 +26,17 @@ @RequestMapping("/webhooks/products") public class ProductWebhookController { - public static final List> EVENT_LIST = new CopyOnWriteArrayList<>(); + public final List> events = new CopyOnWriteArrayList<>(); @PostMapping("/created") @Topic(name = "product.created", pubsubName = "pubsub") - public void handleEvent(@RequestBody CloudEvent cloudEvent) { + public void handleEvent(@RequestBody CloudEvent cloudEvent) { System.out.println("Received product.created event: " + cloudEvent.getData()); - EVENT_LIST.add(cloudEvent); + + events.add(cloudEvent); + } + + public List> getEventList() { + return events; } } diff --git a/testcontainers-dapr/pom.xml b/testcontainers-dapr/pom.xml index 786ec56a96..04d60ec32f 100644 --- a/testcontainers-dapr/pom.xml +++ b/testcontainers-dapr/pom.xml @@ -33,6 +33,10 @@ org.testcontainers testcontainers + + com.fasterxml.jackson.core + jackson-databind + diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java new file mode 100644 index 0000000000..06d057149e --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/AbstractDaprWaitStrategy.java @@ -0,0 +1,143 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import org.testcontainers.containers.ContainerLaunchException; +import org.testcontainers.containers.wait.strategy.AbstractWaitStrategy; +import org.testcontainers.shaded.org.awaitility.Awaitility; + +import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URL; +import java.time.Duration; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; + +/** + * Base wait strategy for Dapr containers that polls the metadata endpoint. + * Subclasses implement specific conditions to wait for. + */ +public abstract class AbstractDaprWaitStrategy extends AbstractWaitStrategy { + + private static final int DAPR_HTTP_PORT = 3500; + private static final String METADATA_ENDPOINT = "/v1.0/metadata"; + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + private Duration pollInterval = Duration.ofMillis(500); + + /** + * Sets the poll interval for checking the metadata endpoint. + * + * @param pollInterval the interval between polling attempts + * @return this strategy for chaining + */ + public AbstractDaprWaitStrategy withPollInterval(Duration pollInterval) { + this.pollInterval = pollInterval; + return this; + } + + @Override + protected void waitUntilReady() { + String host = waitStrategyTarget.getHost(); + Integer port = waitStrategyTarget.getMappedPort(DAPR_HTTP_PORT); + String metadataUrl = String.format("http://%s:%d%s", host, port, METADATA_ENDPOINT); + + try { + Awaitility.await() + .atMost(startupTimeout.getSeconds(), TimeUnit.SECONDS) + .pollInterval(pollInterval.toMillis(), TimeUnit.MILLISECONDS) + .ignoreExceptions() + .until(() -> checkCondition(metadataUrl)); + } catch (Exception e) { + throw new ContainerLaunchException( + String.format("Timed out waiting for Dapr condition: %s", getConditionDescription()), e); + } + } + + /** + * Checks if the wait condition is satisfied. + * + * @param metadataUrl the URL to the metadata endpoint + * @return true if the condition is met + * @throws IOException if there's an error fetching metadata + */ + protected boolean checkCondition(String metadataUrl) throws IOException { + Metadata metadata = fetchMetadata(metadataUrl); + return isConditionMet(metadata); + } + + /** + * Fetches metadata from the Dapr sidecar. + * + * @param metadataUrl the URL to fetch metadata from + * @return the parsed metadata + * @throws IOException if there's an error fetching or parsing + */ + protected Metadata fetchMetadata(String metadataUrl) throws IOException { + HttpURLConnection connection = (HttpURLConnection) new URL(metadataUrl).openConnection(); + connection.setRequestMethod("GET"); + connection.setConnectTimeout(1000); + connection.setReadTimeout(1000); + + try { + int responseCode = connection.getResponseCode(); + if (responseCode != 200) { + throw new IOException("Metadata endpoint returned status: " + responseCode); + } + return OBJECT_MAPPER.readValue(connection.getInputStream(), Metadata.class); + } finally { + connection.disconnect(); + } + } + + /** + * Checks if the specific wait condition is met based on the metadata. + * + * @param metadata the current Dapr metadata + * @return true if the condition is satisfied + */ + protected abstract boolean isConditionMet(Metadata metadata); + + /** + * Returns a description of what this strategy is waiting for. + * + * @return a human-readable description of the condition + */ + protected abstract String getConditionDescription(); + + /** + * Creates a predicate-based wait strategy for custom conditions. + * + * @param predicate the predicate to test against metadata + * @param description a description of what the predicate checks + * @return a new wait strategy + */ + public static AbstractDaprWaitStrategy forCondition(Predicate predicate, String description) { + return new AbstractDaprWaitStrategy() { + @Override + protected boolean isConditionMet(Metadata metadata) { + return predicate.test(metadata); + } + + @Override + protected String getConditionDescription() { + return description; + } + }; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java new file mode 100644 index 0000000000..188e3a281a --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategy.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Actor; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; + +/** + * Wait strategy that waits for actors to be registered with Dapr. + */ +public class ActorWaitStrategy extends AbstractDaprWaitStrategy { + + private final String actorType; + + /** + * Creates a wait strategy that waits for any actor to be registered. + */ + public ActorWaitStrategy() { + this.actorType = null; + } + + /** + * Creates a wait strategy that waits for a specific actor type to be registered. + * + * @param actorType the actor type to wait for + */ + public ActorWaitStrategy(String actorType) { + this.actorType = actorType; + } + + @Override + protected boolean isConditionMet(Metadata metadata) { + if (metadata == null) { + return false; + } + if (actorType == null) { + return !metadata.getActors().isEmpty(); + } + return metadata.getActors().stream() + .anyMatch(this::matchesActorType); + } + + private boolean matchesActorType(Actor actor) { + if (actor == null || actorType == null) { + return false; + } + return actorType.equals(actor.getType()); + } + + @Override + protected String getConditionDescription() { + if (actorType != null) { + return String.format("actor type '%s'", actorType); + } + return "any registered actors"; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java new file mode 100644 index 0000000000..e11f704170 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/DaprWait.java @@ -0,0 +1,99 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; + +import java.util.function.Predicate; + +/** + * Factory class for creating Dapr-specific wait strategies. + * + *

This class provides static factory methods to create wait strategies + * that poll the Dapr metadata endpoint to determine when specific conditions are met. + * This is more reliable than log-based waiting strategies.

+ * + *

Example usage:

+ *
{@code
+ * // Wait for a subscription to be registered
+ * DaprWait.forSubscription("pubsub", "my-topic")
+ *     .withStartupTimeout(Duration.ofSeconds(30))
+ *     .waitUntilReady(daprContainer);
+ *
+ * // Wait for any actors to be registered
+ * DaprWait.forActors()
+ *     .waitUntilReady(daprContainer);
+ *
+ * // Wait for a specific actor type
+ * DaprWait.forActorType("MyActor")
+ *     .waitUntilReady(daprContainer);
+ * }
+ * + * @see Dapr Metadata API + */ +public final class DaprWait { + + private DaprWait() { + // Utility class, no instantiation + } + + /** + * Creates a wait strategy that waits for a subscription to be registered. + * + * @param pubsubName the name of the pub/sub component (can be null to match any) + * @param topic the topic name to wait for (can be null to match any) + * @return a new subscription wait strategy + */ + public static SubscriptionWaitStrategy forSubscription(String pubsubName, String topic) { + return new SubscriptionWaitStrategy(pubsubName, topic); + } + + /** + * Creates a wait strategy that waits for any actors to be registered. + * + * @return a new actor wait strategy + */ + public static ActorWaitStrategy forActors() { + return new ActorWaitStrategy(); + } + + /** + * Creates a wait strategy that waits for a specific actor type to be registered. + * + * @param actorType the actor type to wait for + * @return a new actor wait strategy + */ + public static ActorWaitStrategy forActorType(String actorType) { + return new ActorWaitStrategy(actorType); + } + + /** + * Creates a wait strategy with a custom condition based on Dapr metadata. + * + *

Example:

+ *
{@code
+   * DaprWait.forCondition(
+   *     metadata -> metadata.getComponents().size() >= 2,
+   *     "at least 2 components to be loaded"
+   * );
+   * }
+ * + * @param predicate the condition to check against the metadata + * @param description a human-readable description of the condition + * @return a new custom wait strategy + */ + public static AbstractDaprWaitStrategy forCondition(Predicate predicate, String description) { + return AbstractDaprWaitStrategy.forCondition(predicate, description); + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java new file mode 100644 index 0000000000..4fff91a63a --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategy.java @@ -0,0 +1,68 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import io.dapr.testcontainers.wait.strategy.metadata.Subscription; + +/** + * Wait strategy that waits for a specific subscription to be registered with Dapr. + */ +public class SubscriptionWaitStrategy extends AbstractDaprWaitStrategy { + + private final String pubsubName; + private final String topic; + + /** + * Creates a wait strategy for a specific subscription. + * + * @param pubsubName the name of the pub/sub component + * @param topic the topic name to wait for + */ + public SubscriptionWaitStrategy(String pubsubName, String topic) { + this.pubsubName = pubsubName; + this.topic = topic; + } + + @Override + protected boolean isConditionMet(Metadata metadata) { + if (metadata == null) { + return false; + } + return metadata.getSubscriptions().stream() + .anyMatch(this::matchesSubscription); + } + + private boolean matchesSubscription(Subscription subscription) { + if (subscription == null) { + return false; + } + boolean pubsubMatches = pubsubName == null || pubsubName.equals(subscription.getPubsubname()); + boolean topicMatches = topic == null || topic.equals(subscription.getTopic()); + return pubsubMatches && topicMatches; + } + + @Override + protected String getConditionDescription() { + if (pubsubName != null && topic != null) { + return String.format("subscription for pubsub '%s' and topic '%s'", pubsubName, topic); + } else if (pubsubName != null) { + return String.format("subscription for pubsub '%s'", pubsubName); + } else if (topic != null) { + return String.format("subscription for topic '%s'", topic); + } else { + return "any subscription"; + } + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java new file mode 100644 index 0000000000..8a859151c5 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Actor.java @@ -0,0 +1,41 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +/** + * Represents an actor entry from the Dapr metadata API response. + */ +public class Actor { + private String type; + private int count; + + public Actor() { + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public int getCount() { + return count; + } + + public void setCount(int count) { + this.count = count; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java new file mode 100644 index 0000000000..08915b18b6 --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Component.java @@ -0,0 +1,61 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import java.util.List; + +/** + * Represents a component entry from the Dapr metadata API response. + */ +public class Component { + private String name; + private String type; + private String version; + private List capabilities; + + public Component() { + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + public List getCapabilities() { + return capabilities; + } + + public void setCapabilities(List capabilities) { + this.capabilities = capabilities; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java new file mode 100644 index 0000000000..4ad8080d8a --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Metadata.java @@ -0,0 +1,82 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import java.util.Collections; +import java.util.List; + +/** + * Represents the response from the Dapr metadata API (/v1.0/metadata). + * + * @see Dapr Metadata API + */ +public class Metadata { + private String id; + private String runtimeVersion; + private List enabledFeatures; + private List actors; + private List components; + private List subscriptions; + + public Metadata() { + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public String getRuntimeVersion() { + return runtimeVersion; + } + + public void setRuntimeVersion(String runtimeVersion) { + this.runtimeVersion = runtimeVersion; + } + + public List getEnabledFeatures() { + return enabledFeatures; + } + + public void setEnabledFeatures(List enabledFeatures) { + this.enabledFeatures = enabledFeatures; + } + + public List getActors() { + return actors != null ? actors : Collections.emptyList(); + } + + public void setActors(List actors) { + this.actors = actors; + } + + public List getComponents() { + return components != null ? components : Collections.emptyList(); + } + + public void setComponents(List components) { + this.components = components; + } + + public List getSubscriptions() { + return subscriptions != null ? subscriptions : Collections.emptyList(); + } + + public void setSubscriptions(List subscriptions) { + this.subscriptions = subscriptions; + } +} diff --git a/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java new file mode 100644 index 0000000000..8d775b600f --- /dev/null +++ b/testcontainers-dapr/src/main/java/io/dapr/testcontainers/wait/strategy/metadata/Subscription.java @@ -0,0 +1,107 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import java.util.List; +import java.util.Map; + +/** + * Represents a subscription entry from the Dapr metadata API response. + */ +public class Subscription { + private String pubsubname; + private String topic; + private String deadLetterTopic; + private Map metadata; + private List rules; + private String type; + + public Subscription() { + } + + public String getPubsubname() { + return pubsubname; + } + + public void setPubsubname(String pubsubname) { + this.pubsubname = pubsubname; + } + + public String getTopic() { + return topic; + } + + public void setTopic(String topic) { + this.topic = topic; + } + + public String getDeadLetterTopic() { + return deadLetterTopic; + } + + public void setDeadLetterTopic(String deadLetterTopic) { + this.deadLetterTopic = deadLetterTopic; + } + + public Map getMetadata() { + return metadata; + } + + public void setMetadata(Map metadata) { + this.metadata = metadata; + } + + public List getRules() { + return rules; + } + + public void setRules(List rules) { + this.rules = rules; + } + + public String getType() { + return type; + } + + public void setType(String type) { + this.type = type; + } + + /** + * Represents a routing rule for a subscription. + */ + public static class Rule { + private String match; + private String path; + + public Rule() { + } + + public String getMatch() { + return match; + } + + public void setMatch(String match) { + this.match = match; + } + + public String getPath() { + return path; + } + + public void setPath(String path) { + this.path = path; + } + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java new file mode 100644 index 0000000000..d8ae653f77 --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/ActorWaitStrategyTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Actor; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class ActorWaitStrategyTest { + + @Test + @DisplayName("Should match any actor when no specific type is specified") + void shouldMatchAnyActorWhenNoTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy(); + Metadata metadata = createMetadataWithActor("SomeActor"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when no actors exist and no type is specified") + void shouldNotMatchWhenNoActorsAndNoTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy(); + Metadata metadata = new Metadata(); + + metadata.setActors(Collections.emptyList()); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match when specific actor type exists") + void shouldMatchSpecificActorType() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + Metadata metadata = createMetadataWithActor("MyActor"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when actor type differs from expected") + void shouldNotMatchWhenActorTypeDiffers() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + Metadata metadata = createMetadataWithActor("OtherActor"); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when no actors exist but specific type is expected") + void shouldNotMatchWhenNoActorsAndTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + Metadata metadata = new Metadata(); + + metadata.setActors(Collections.emptyList()); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should find matching actor among multiple registered actors") + void shouldFindMatchAmongMultipleActors() { + ActorWaitStrategy strategy = new ActorWaitStrategy("TargetActor"); + + Actor actor1 = createActor("FirstActor"); + Actor actor2 = createActor("TargetActor"); + Actor actor3 = createActor("ThirdActor"); + + Metadata metadata = new Metadata(); + metadata.setActors(Arrays.asList(actor1, actor2, actor3)); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should provide correct human-readable condition description") + void shouldProvideCorrectDescription() { + ActorWaitStrategy anyActors = new ActorWaitStrategy(); + assertEquals("any registered actors", anyActors.getConditionDescription()); + + ActorWaitStrategy specificActor = new ActorWaitStrategy("MyActor"); + assertEquals("actor type 'MyActor'", specificActor.getConditionDescription()); + } + + @Test + @DisplayName("Should handle null actor in list without throwing NPE") + void shouldHandleNullActorInList() { + ActorWaitStrategy strategy = new ActorWaitStrategy("TargetActor"); + Metadata metadata = new Metadata(); + metadata.setActors(Arrays.asList(null, createActor("TargetActor"))); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should return false when metadata is null") + void shouldReturnFalseWhenMetadataIsNull() { + ActorWaitStrategy strategy = new ActorWaitStrategy(); + + assertFalse(strategy.isConditionMet(null)); + } + + @Test + @DisplayName("Should return false when metadata is null and actor type is specified") + void shouldReturnFalseWhenMetadataIsNullAndActorTypeSpecified() { + ActorWaitStrategy strategy = new ActorWaitStrategy("MyActor"); + + assertFalse(strategy.isConditionMet(null)); + } + + private Metadata createMetadataWithActor(String actorType) { + Metadata metadata = new Metadata(); + metadata.setActors(Collections.singletonList(createActor(actorType))); + return metadata; + } + + private Actor createActor(String type) { + Actor actor = new Actor(); + actor.setType(type); + actor.setCount(1); + return actor; + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java new file mode 100644 index 0000000000..556f76cf7c --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/DaprWaitTest.java @@ -0,0 +1,110 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Component; +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class DaprWaitTest { + + @Test + @DisplayName("forSubscription should create SubscriptionWaitStrategy") + void forSubscriptionShouldCreateSubscriptionWaitStrategy() { + AbstractDaprWaitStrategy strategy = DaprWait.forSubscription("pubsub", "orders"); + + assertInstanceOf(SubscriptionWaitStrategy.class, strategy); + } + + @Test + @DisplayName("forSubscription with null topic should match any topic") + void forSubscriptionWithNullTopicShouldMatchAnyTopic() { + SubscriptionWaitStrategy strategy = DaprWait.forSubscription("pubsub", null); + + assertNotNull(strategy); + assertEquals("subscription for pubsub 'pubsub'", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forSubscription with null pubsub should match any pubsub") + void forSubscriptionWithNullPubsubShouldMatchAnyPubsub() { + SubscriptionWaitStrategy strategy = DaprWait.forSubscription(null, "orders"); + + assertNotNull(strategy); + assertEquals("subscription for topic 'orders'", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forActors should create ActorWaitStrategy for any actor") + void forActorsShouldCreateActorWaitStrategyForAnyActor() { + ActorWaitStrategy strategy = DaprWait.forActors(); + + assertNotNull(strategy); + assertEquals("any registered actors", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forActorType should create ActorWaitStrategy for specific type") + void forActorTypeShouldCreateActorWaitStrategyForSpecificType() { + ActorWaitStrategy strategy = DaprWait.forActorType("MyActor"); + + assertNotNull(strategy); + assertEquals("actor type 'MyActor'", strategy.getConditionDescription()); + } + + @Test + @DisplayName("forCondition should create custom wait strategy with predicate") + void forConditionShouldCreateCustomWaitStrategy() { + AbstractDaprWaitStrategy strategy = DaprWait.forCondition( + metadata -> metadata.getComponents().size() >= 2, + "at least 2 components" + ); + + assertNotNull(strategy); + assertEquals("at least 2 components", strategy.getConditionDescription()); + + Metadata metadataWith2Components = new Metadata(); + Component comp1 = new Component(); + comp1.setName("comp1"); + Component comp2 = new Component(); + comp2.setName("comp2"); + metadataWith2Components.setComponents(Arrays.asList(comp1, comp2)); + + Metadata metadataWith1Component = new Metadata(); + metadataWith1Component.setComponents(Arrays.asList(comp1)); + + assertTrue(strategy.isConditionMet(metadataWith2Components)); + assertFalse(strategy.isConditionMet(metadataWith1Component)); + } + + @Test + @DisplayName("Strategy should support fluent configuration with poll interval and timeout") + void strategyShouldSupportFluentConfiguration() { + AbstractDaprWaitStrategy strategy = DaprWait.forSubscription("pubsub", "orders") + .withPollInterval(Duration.ofMillis(250)); + strategy.withStartupTimeout(Duration.ofSeconds(60)); + + assertNotNull(strategy); + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java new file mode 100644 index 0000000000..014c883c15 --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/SubscriptionWaitStrategyTest.java @@ -0,0 +1,154 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy; + +import io.dapr.testcontainers.wait.strategy.metadata.Metadata; +import io.dapr.testcontainers.wait.strategy.metadata.Subscription; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class SubscriptionWaitStrategyTest { + + @Test + @DisplayName("Should match when pubsub and topic exactly match") + void shouldMatchExactSubscription() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = createMetadataWithSubscription("pubsub", "orders"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when pubsub name differs") + void shouldNotMatchWhenPubsubDiffers() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = createMetadataWithSubscription("other-pubsub", "orders"); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when topic name differs") + void shouldNotMatchWhenTopicDiffers() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = createMetadataWithSubscription("pubsub", "other-topic"); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should not match when no subscriptions exist") + void shouldNotMatchWhenNoSubscriptions() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Collections.emptyList()); + + assertFalse(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match any topic when topic filter is null") + void shouldMatchAnyTopicWhenTopicIsNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", null); + Metadata metadata = createMetadataWithSubscription("pubsub", "any-topic"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match any pubsub when pubsub filter is null") + void shouldMatchAnyPubsubWhenPubsubIsNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy(null, "orders"); + Metadata metadata = createMetadataWithSubscription("any-pubsub", "orders"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should match any subscription when both filters are null") + void shouldMatchAnySubscriptionWhenBothAreNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy(null, null); + Metadata metadata = createMetadataWithSubscription("any-pubsub", "any-topic"); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should find matching subscription among multiple subscriptions") + void shouldFindMatchAmongMultipleSubscriptions() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Subscription sub1 = createSubscription("other-pubsub", "other-topic"); + Subscription sub2 = createSubscription("pubsub", "orders"); + Subscription sub3 = createSubscription("another-pubsub", "another-topic"); + + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Arrays.asList(sub1, sub2, sub3)); + + assertTrue(strategy.isConditionMet(metadata)); + } + + @Test + @DisplayName("Should provide correct human-readable condition description") + void shouldProvideCorrectDescription() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + assertEquals("subscription for pubsub 'pubsub' and topic 'orders'", strategy.getConditionDescription()); + + SubscriptionWaitStrategy pubsubOnly = new SubscriptionWaitStrategy("pubsub", null); + assertEquals("subscription for pubsub 'pubsub'", pubsubOnly.getConditionDescription()); + + SubscriptionWaitStrategy topicOnly = new SubscriptionWaitStrategy(null, "orders"); + assertEquals("subscription for topic 'orders'", topicOnly.getConditionDescription()); + + SubscriptionWaitStrategy any = new SubscriptionWaitStrategy(null, null); + assertEquals("any subscription", any.getConditionDescription()); + } + + @Test + @DisplayName("Should return false when metadata is null") + void shouldReturnFalseWhenMetadataIsNull() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + + assertFalse(strategy.isConditionMet(null)); + } + + @Test + @DisplayName("Should handle null subscription in list without throwing NPE") + void shouldHandleNullSubscriptionInList() { + SubscriptionWaitStrategy strategy = new SubscriptionWaitStrategy("pubsub", "orders"); + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Arrays.asList(null, createSubscription("pubsub", "orders"))); + + assertTrue(strategy.isConditionMet(metadata)); + } + + private Metadata createMetadataWithSubscription(String pubsubName, String topic) { + Metadata metadata = new Metadata(); + metadata.setSubscriptions(Collections.singletonList(createSubscription(pubsubName, topic))); + return metadata; + } + + private Subscription createSubscription(String pubsubName, String topic) { + Subscription subscription = new Subscription(); + subscription.setPubsubname(pubsubName); + subscription.setTopic(topic); + return subscription; + } +} diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java new file mode 100644 index 0000000000..c7f7c579c7 --- /dev/null +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/wait/strategy/metadata/MetadataTest.java @@ -0,0 +1,197 @@ +/* + * Copyright 2025 The Dapr Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and +limitations under the License. +*/ + +package io.dapr.testcontainers.wait.strategy.metadata; + +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class MetadataTest { + + private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper() + .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + + @Test + @DisplayName("Metadata should return empty list when actors is null") + void metadataShouldReturnEmptyListWhenActorsIsNull() { + Metadata metadata = new Metadata(); + + assertNotNull(metadata.getActors()); + assertTrue(metadata.getActors().isEmpty()); + } + + @Test + @DisplayName("Metadata should return empty list when components is null") + void metadataShouldReturnEmptyListWhenComponentsIsNull() { + Metadata metadata = new Metadata(); + + assertNotNull(metadata.getComponents()); + assertTrue(metadata.getComponents().isEmpty()); + } + + @Test + @DisplayName("Metadata should return empty list when subscriptions is null") + void metadataShouldReturnEmptyListWhenSubscriptionsIsNull() { + Metadata metadata = new Metadata(); + + assertNotNull(metadata.getSubscriptions()); + assertTrue(metadata.getSubscriptions().isEmpty()); + } + + @Test + @DisplayName("Metadata should store and retrieve all fields correctly") + void metadataShouldStoreAndRetrieveAllFields() { + Metadata metadata = new Metadata(); + metadata.setId("test-app"); + metadata.setRuntimeVersion("1.14.0"); + metadata.setEnabledFeatures(Arrays.asList("feature1", "feature2")); + + Actor actor = new Actor(); + actor.setType("MyActor"); + metadata.setActors(Collections.singletonList(actor)); + + Component component = new Component(); + component.setName("statestore"); + metadata.setComponents(Collections.singletonList(component)); + + Subscription subscription = new Subscription(); + subscription.setTopic("orders"); + metadata.setSubscriptions(Collections.singletonList(subscription)); + + assertEquals("test-app", metadata.getId()); + assertEquals("1.14.0", metadata.getRuntimeVersion()); + assertEquals(2, metadata.getEnabledFeatures().size()); + assertEquals(1, metadata.getActors().size()); + assertEquals(1, metadata.getComponents().size()); + assertEquals(1, metadata.getSubscriptions().size()); + } + + @Test + @DisplayName("Actor should store and retrieve all fields correctly") + void actorShouldStoreAndRetrieveAllFields() { + Actor actor = new Actor(); + actor.setType("OrderActor"); + actor.setCount(5); + + assertEquals("OrderActor", actor.getType()); + assertEquals(5, actor.getCount()); + } + + @Test + @DisplayName("Component should store and retrieve all fields correctly") + void componentShouldStoreAndRetrieveAllFields() { + Component component = new Component(); + component.setName("statestore"); + component.setType("state.redis"); + component.setVersion("v1"); + component.setCapabilities(Arrays.asList("ETAG", "TRANSACTIONAL")); + + assertEquals("statestore", component.getName()); + assertEquals("state.redis", component.getType()); + assertEquals("v1", component.getVersion()); + assertEquals(2, component.getCapabilities().size()); + assertTrue(component.getCapabilities().contains("ETAG")); + } + + @Test + @DisplayName("Subscription should store and retrieve all fields including rules") + void subscriptionShouldStoreAndRetrieveAllFields() { + Subscription subscription = new Subscription(); + subscription.setPubsubname("pubsub"); + subscription.setTopic("orders"); + subscription.setDeadLetterTopic("orders-dlq"); + subscription.setType("declarative"); + + Map meta = new HashMap<>(); + meta.put("key", "value"); + subscription.setMetadata(meta); + + Subscription.Rule rule = new Subscription.Rule(); + rule.setMatch("event.type == 'order'"); + rule.setPath("/orders"); + subscription.setRules(Collections.singletonList(rule)); + + assertEquals("pubsub", subscription.getPubsubname()); + assertEquals("orders", subscription.getTopic()); + assertEquals("orders-dlq", subscription.getDeadLetterTopic()); + assertEquals("declarative", subscription.getType()); + assertEquals("value", subscription.getMetadata().get("key")); + assertEquals(1, subscription.getRules().size()); + assertEquals("event.type == 'order'", subscription.getRules().get(0).getMatch()); + assertEquals("/orders", subscription.getRules().get(0).getPath()); + } + + @Test + @DisplayName("Should deserialize complete Dapr metadata JSON response") + void shouldDeserializeMetadataFromJson() throws Exception { + String json = "{" + + "\"id\": \"my-app\"," + + "\"runtimeVersion\": \"1.14.0\"," + + "\"enabledFeatures\": [\"ServiceInvocationStreaming\"]," + + "\"actors\": [{\"type\": \"OrderActor\", \"count\": 3}]," + + "\"components\": [{\"name\": \"statestore\", \"type\": \"state.redis\", \"version\": \"v1\", \"capabilities\": [\"ETAG\"]}]," + + "\"subscriptions\": [{" + + " \"pubsubname\": \"pubsub\"," + + " \"topic\": \"orders\"," + + " \"deadLetterTopic\": \"orders-dlq\"," + + " \"type\": \"programmatic\"," + + " \"rules\": [{\"match\": \"\", \"path\": \"/orders\"}]" + + "}]" + + "}"; + + Metadata metadata = OBJECT_MAPPER.readValue(json, Metadata.class); + + assertEquals("my-app", metadata.getId()); + assertEquals("1.14.0", metadata.getRuntimeVersion()); + assertEquals(1, metadata.getEnabledFeatures().size()); + + assertEquals(1, metadata.getActors().size()); + assertEquals("OrderActor", metadata.getActors().get(0).getType()); + assertEquals(3, metadata.getActors().get(0).getCount()); + + assertEquals(1, metadata.getComponents().size()); + assertEquals("statestore", metadata.getComponents().get(0).getName()); + assertEquals("state.redis", metadata.getComponents().get(0).getType()); + + assertEquals(1, metadata.getSubscriptions().size()); + assertEquals("pubsub", metadata.getSubscriptions().get(0).getPubsubname()); + assertEquals("orders", metadata.getSubscriptions().get(0).getTopic()); + assertEquals(1, metadata.getSubscriptions().get(0).getRules().size()); + } + + @Test + @DisplayName("Should ignore unknown fields when deserializing JSON") + void shouldDeserializeMetadataWithUnknownFields() throws Exception { + String json = "{" + + "\"id\": \"my-app\"," + + "\"unknownField\": \"should be ignored\"," + + "\"anotherUnknown\": {\"nested\": true}" + + "}"; + + Metadata metadata = OBJECT_MAPPER.readValue(json, Metadata.class); + + assertEquals("my-app", metadata.getId()); + assertTrue(metadata.getActors().isEmpty()); + } +} From 3ba2bc9950ea1db99c44c046b0f7488711cf6fdf Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 Jan 2026 19:35:20 -0800 Subject: [PATCH 16/17] Bump actions/upload-artifact from 4 to 6 (#1606) Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 6. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/v4...v6) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Cassie Coyle Co-authored-by: Dapr Bot <56698301+dapr-bot@users.noreply.github.com> Signed-off-by: salaboy --- .github/workflows/build.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b59173db88..5613ce1685 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,12 +34,12 @@ jobs: - name: Codecov uses: codecov/codecov-action@v5.5.2 - name: Upload test report for sdk - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: test-dapr-java-sdk-jdk${{ env.JDK_VER }} path: sdk/target/jacoco-report/ - name: Upload test report for sdk-actors - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: report-dapr-java-sdk-actors-jdk${{ env.JDK_VER }} path: sdk-actors/target/jacoco-report/ @@ -83,7 +83,7 @@ jobs: run: docker kill durabletask-sidecar - name: Upload Durable Task Sidecar Logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@v6 with: name: Durable Task Sidecar Logs path: durabletask-sidecar.log @@ -200,13 +200,13 @@ jobs: run: PRODUCT_SPRING_BOOT_VERSION=${{ matrix.spring-boot-version }} ./mvnw -B -pl !durabletask-client -Pintegration-tests dependency:copy-dependencies verify - name: Upload failsafe test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: failsafe-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/failsafe-reports - name: Upload surefire test report for sdk-tests on failure if: ${{ failure() && steps.integration_tests.conclusion == 'failure' }} - uses: actions/upload-artifact@v5 + uses: actions/upload-artifact@v6 with: name: surefire-report-sdk-tests-jdk${{ matrix.java }}-sb${{ matrix.spring-boot-version }} path: sdk-tests/target/surefire-reports From fcc8da5d91f21abefe8f8765211bba842324a092 Mon Sep 17 00:00:00 2001 From: salaboy Date: Mon, 5 Jan 2026 12:18:51 +0100 Subject: [PATCH 17/17] updating test for checking nulls Signed-off-by: salaboy --- .../ConfigurationYamlConverterTest.java | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java index 8f15d465de..dec7e98152 100644 --- a/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java +++ b/testcontainers-dapr/src/test/java/io/dapr/testcontainers/converter/ConfigurationYamlConverterTest.java @@ -26,6 +26,7 @@ import static io.dapr.testcontainers.DaprContainerConstants.DAPR_RUNTIME_IMAGE_TAG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import java.util.ArrayList; import java.util.List; @@ -69,6 +70,9 @@ public void testConfigurationToYaml() { Configuration configuration = dapr.getConfiguration(); assertNotNull(configuration); + assertEquals(httpPipeline, configuration.getHttpPipeline()); + assertEquals(appHttpPipeline, configuration.getAppHttpPipeline()); + String configurationYaml = converter.convert(configuration); String expectedConfigurationYaml = "apiVersion: dapr.io/v1alpha1\n" @@ -94,4 +98,31 @@ public void testConfigurationToYaml() { assertEquals(expectedConfigurationYaml, configurationYaml); } + + @Test + public void testConfigurationToYamlNoPipelines() { + + DaprContainer dapr = new DaprContainer(DAPR_RUNTIME_IMAGE_TAG) + .withAppName("dapr-app") + .withAppPort(8081) + .withConfiguration(new Configuration("my-config", null, null, null)) + .withAppChannelAddress("host.testcontainers.internal"); + + Configuration configuration = dapr.getConfiguration(); + assertNotNull(configuration); + + assertNull(configuration.getHttpPipeline()); + assertNull(configuration.getAppHttpPipeline()); + + String configurationYaml = converter.convert(configuration); + + String expectedConfigurationYaml = + "apiVersion: dapr.io/v1alpha1\n" + + "kind: Configuration\n" + + "metadata:\n" + + " name: my-config\n" + + "spec: {}\n"; + + assertEquals(expectedConfigurationYaml, configurationYaml); + } }