diff --git a/.github/workflows/build-samples.yml b/.github/workflows/build-samples.yml index a68f0cc..3dcfa76 100644 --- a/.github/workflows/build-samples.yml +++ b/.github/workflows/build-samples.yml @@ -150,6 +150,10 @@ jobs: - name: Build DTS - opentelemetry-tracing run: cd samples/durable-task-sdks/java/opentelemetry-tracing && chmod +x gradlew && ./gradlew build + # Scenario samples (Gradle) + - name: Build Scenario - WorkItemFilteringSplitActivitiesJava + run: cd samples/scenarios/WorkItemFilteringSplitActivitiesJava && ./gradlew build + # Durable Functions samples (Maven) - name: Build DF - HelloCities run: cd samples/durable-functions/java/HelloCities && mvn compile --batch-mode diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/.gitignore b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/.gitignore new file mode 100644 index 0000000..a60a9a5 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/.gitignore @@ -0,0 +1,12 @@ +.gradle/ +build/ +!gradle/wrapper/gradle-wrapper.jar +.idea/ +*.iml +*.ipr +*.iws +out/ +.settings/ +.classpath +.project +bin/ diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/README.md b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/README.md new file mode 100644 index 0000000..2bc0fb2 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/README.md @@ -0,0 +1,288 @@ +# Work Item Filtering — Split Activities Sample (Java) + +This sample demonstrates **Work Item Filtering**, a feature that allows workers to declare which orchestrations, activities, and entities they can process. The Durable Task Scheduler (DTS) backend routes work items only to workers whose filters match, preventing workers from receiving work they cannot handle. + +Before work item filtering, all orchestrations, activities, and entities were handed to any connected worker regardless of what it actually hosted. This caused errors (or silent hangs) when a worker received a work item it didn't implement — especially problematic in multi-service deployments, rolling upgrades, and microservice topologies. With filtering, each worker registers its task set; DTS creates per-filter queues and routes work items to matching workers. If no filter is specified, behavior falls back to the "generic queue" (all workers receive everything). + +## Architecture + +``` +┌─────────────────────────────────────────────────────────────┐ +│ Durable Task Scheduler (DTS) │ +│ │ +│ Orchestration queue ──► routed to Orchestrator Worker only │ +│ ValidateOrder queue ──► routed to Validator Worker only │ +│ ShipOrder queue ──► routed to Shipper Worker only │ +└────────────┬──────────────────┬──────────────────┬──────────┘ + │ │ │ + ┌───────▼───────┐ ┌──────▼───────┐ ┌──────▼───────┐ + │ Orchestrator │ │ Validator │ │ Shipper │ + │ Worker │ │ Worker │ │ Worker │ + │ │ │ │ │ │ + │ Registers: │ │ Registers: │ │ Registers: │ + │ • OrderProc- │ │ • Validate- │ │ • ShipOrder │ + │ essing- │ │ Order │ │ │ + │ Orchestration│ │ │ │ │ + └───────────────┘ └───────────────┘ └───────────────┘ + + ┌───────────────┐ + │ Client │ + │ (Driver) │ + │ │ + │ Schedules new │ + │ orchestrations │ + │ and prints │ + │ results │ + └───────────────┘ +``` + +**Orchestrator Worker** runs orchestrations only — it has no activities registered. +**Validator Worker** runs `ValidateOrder` only — it has no orchestrations or other activities. +**Shipper Worker** runs `ShipOrder` only — same isolation. +**Client** schedules orchestrations and polls for completion. + +## The Orchestration + +`OrderProcessingOrchestration` performs two sequential activity calls: + +1. `ValidateOrder(orderId)` → routed to Validator Worker +2. `ShipOrder(orderId)` → routed to Shipper Worker + +Returns a combined result string. + +## Prerequisites + +- [Java 21](https://adoptium.net/) (or later) +- [Docker](https://docs.docker.com/get-docker/) (for the DTS emulator) + +## Running Locally + +### 1. Start the DTS Emulator + +```bash +docker pull mcr.microsoft.com/dts/dts-emulator:latest +docker run -d --name dts-emulator -p 8080:8080 -p 8082:8082 mcr.microsoft.com/dts/dts-emulator:latest +``` + +The emulator dashboard is available at `http://localhost:8082`. + +### 2. Build all projects + +```bash +cd samples/scenarios/WorkItemFilteringSplitActivitiesJava +./gradlew build +``` + +### 3. Start the three workers (each in a separate terminal) + +**Terminal 1 — Orchestrator Worker:** +```bash +./gradlew :orchestrator-worker:run +``` + +**Terminal 2 — Validator Worker (ValidateOrder activity):** +```bash +./gradlew :validator-worker:run +``` + +**Terminal 3 — Shipper Worker (ShipOrder activity):** +```bash +./gradlew :shipper-worker:run +``` + +### 4. Run the Client (in a fourth terminal) + +```bash +./gradlew :client:run +``` + +## Expected Output + +The client runs in a **continuous loop**, scheduling a batch of 3 orchestrations every 30 seconds for 10 minutes. This makes it easy to observe scaling behavior over time. + +### Client terminal + +``` +10:30:01 INFO i.d.samples.Client - === Work Item Filtering Demo — Client === +10:30:01 INFO i.d.samples.Client - Will schedule 3 orchestrations every 30s for 10 minutes. + +10:30:01 INFO i.d.samples.Client - --- Batch #1 at 2025-01-01T10:30:01Z --- +10:30:01 INFO i.d.samples.Client - Scheduling orchestration with orderId='ORD-B001-001'... +10:30:01 INFO i.d.samples.Client - -> Scheduled with InstanceId=abc123 +10:30:01 INFO i.d.samples.Client - Scheduling orchestration with orderId='ORD-B001-002'... +10:30:01 INFO i.d.samples.Client - -> Scheduled with InstanceId=def456 +10:30:01 INFO i.d.samples.Client - Scheduling orchestration with orderId='ORD-B001-003'... +10:30:01 INFO i.d.samples.Client - -> Scheduled with InstanceId=ghi789 +10:30:02 INFO i.d.samples.Client - COMPLETED | InstanceId=abc123 | Output: Order 'ORD-B001-001' => Validation: [Order ORD-B001-001 is valid], Shipping: [Shipped with tracking TRACK-ORD-B001-001-4271] +... +10:30:02 INFO i.d.samples.Client - Batch #1 results: 3 completed, 0 failed +``` + +### Orchestrator Worker terminal (orchestrations only — no activities) + +``` +10:30:02 INFO i.d.samples.OrchestratorWorker - [Orchestrator] Orchestration | Name=OrderProcessingOrchestration | InstanceId=abc123 | Processing order 'ORD-B001-001' +10:30:02 INFO i.d.samples.OrchestratorWorker - [Orchestrator] Orchestration | InstanceId=abc123 | Dispatching ValidateOrder to Validator Worker... +10:30:02 INFO i.d.samples.OrchestratorWorker - [Orchestrator] Orchestration | InstanceId=abc123 | Dispatching ShipOrder to Shipper Worker... +10:30:02 INFO i.d.samples.OrchestratorWorker - [Orchestrator] Orchestration | InstanceId=abc123 | Completed: Order 'ORD-B001-001' => Validation: [...], Shipping: [...] +``` + +### Validator Worker terminal (ValidateOrder only — no ShipOrder, no orchestrations) + +``` +10:30:02 INFO i.d.samples.ValidatorWorker - [Validator] Activity | Name=ValidateOrder | Validating order 'ORD-B001-001'... +10:30:02 INFO i.d.samples.ValidatorWorker - [Validator] Activity | Name=ValidateOrder | Result: Order ORD-B001-001 is valid +``` + +### Shipper Worker terminal (ShipOrder only — no ValidateOrder, no orchestrations) + +``` +10:30:02 INFO i.d.samples.ShipperWorker - [Shipper] Activity | Name=ShipOrder | Shipping order 'ORD-B001-001'... +10:30:02 INFO i.d.samples.ShipperWorker - [Shipper] Activity | Name=ShipOrder | Result: Shipped with tracking TRACK-ORD-B001-001-4271 +``` + +**Key observation:** Each worker processes **only** its registered work item types. No cross-processing occurs. + +## What to Try Next: Strict Routing Experiment + +1. **Stop Shipper Worker** (Ctrl+C in Terminal 3). +2. Run the Client again to schedule new orchestrations. +3. Observe that: + - Orchestrator Worker picks up and starts orchestrations. + - Validator Worker completes `ValidateOrder` for each order. + - `ShipOrder` work items **remain pending** — they are not delivered to Validator Worker or Orchestrator Worker. + - The orchestrations stay in "Running" status, waiting for the `ShipOrder` activity to complete. +4. **Restart Shipper Worker** — the pending `ShipOrder` work items are immediately delivered and the orchestrations complete. + +This demonstrates that filtering is strict: work items are routed only to workers with matching filters. There is no fallback to other workers. + +## How It Works + +Each worker process builds a `DurableTaskGrpcWorker` with only its own orchestrations or activities registered, then calls `useWorkItemFilters()` to auto-generate filters from the registry: + +```java +DurableTaskGrpcWorker worker = DurableTaskSchedulerWorkerExtensions + .createWorkerBuilder(connectionString) + .addActivity(new TaskActivityFactory() { /* ... */ }) + .useWorkItemFilters() // auto-generate from registered tasks + .build(); +``` + +- Orchestrator Worker's filter: `orchestrations: [OrderProcessingOrchestration]` +- Validator Worker's filter: `activities: [ValidateOrder]` +- Shipper Worker's filter: `activities: [ShipOrder]` + +DTS creates per-filter queues and routes each work item to the matching queue. If a filter list is empty for a given type (e.g., Validator Worker has no orchestration filter), that worker simply never receives work items of that type. + +To supply explicit filters instead of auto-generating them, use `useWorkItemFilters(WorkItemFilter)`: + +```java +WorkItemFilter filter = WorkItemFilter.newBuilder() + .addOrchestration("OrderProcessingOrchestration") + .addActivity("ValidateOrder") + .build(); + +builder.useWorkItemFilters(filter); +``` + +## Deploying to Azure + +This sample includes full infrastructure-as-code (Bicep) and an `azure.yaml` for one-command deployment via [Azure Developer CLI (`azd`)](https://learn.microsoft.com/azure/developer/azure-developer-cli/). + +### What Gets Deployed + +| Resource | Purpose | +|---|---| +| **Resource Group** | Contains all resources | +| **Durable Task Scheduler** (Consumption SKU) | Managed orchestration backend | +| **Task Hub** | Logical unit for orchestrations and work items | +| **Container Apps Environment** | Shared hosting environment with VNet integration | +| **Azure Container Registry** | Stores Docker images for each service | +| **User-Assigned Managed Identity** | Shared identity with DTS Worker/Client RBAC role | +| **4 Container Apps** | Client, Orchestrator Worker, Validator Worker, Shipper Worker | + +### Deploy with `azd` + +```bash +cd samples/scenarios/WorkItemFilteringSplitActivitiesJava +azd up +``` + +You'll be prompted for an environment name, subscription, and location. The deployment takes ~5 minutes. + +### KEDA Scaling with DTS + +Each worker Container App is configured with a **DTS-aware KEDA custom scale rule** (`azure-durabletask-scheduler`) that scales based on the **work item backlog** in the task hub. The key parameter is `workItemType`, which tells the scaler what kind of work to monitor: + +| Container App | Service Name | `workItemType` | Scales on | +|---|---|---|---| +| **Client** | `client` | `Orchestration` | Pending orchestration work items | +| **Orchestrator Worker** | `orchestrator-worker` | `Orchestration` | Pending orchestration work items | +| **Validator Worker** | `validator-worker` | `Activity` | Pending activity work items | +| **Shipper Worker** | `shipper-worker` | `Activity` | Pending activity work items | + +Workers scale from **0 to 10** replicas. When the client finishes its loop and no more work items arrive, workers scale back to zero. + +### Manual Deployment (without `azd`) + +Set the `ENDPOINT` and `TASKHUB` environment variables to point to your deployed scheduler: + +```bash +export ENDPOINT="https://your-scheduler.westus2.durabletask.io" +export TASKHUB="your-taskhub-name" +``` + +The workers and client will automatically use `DefaultAzureCredential` for authentication. Make sure the identity running each process has the **Durable Task Scheduler Worker** / **Durable Task Scheduler Client** role on the scheduler resource. + +## Project Structure + +``` +WorkItemFilteringSplitActivitiesJava/ +├── build.gradle # Root Gradle build (shared dependencies) +├── settings.gradle # Multi-project definition +├── README.md +├── azure.yaml # azd service definitions +├── .gitignore +├── infra/ # Bicep infrastructure-as-code +│ ├── main.bicep # Top-level — resource group, DTS, container apps +│ ├── main.parameters.json +│ ├── abbreviations.json +│ ├── app/ +│ │ ├── app.bicep # Per-service container app (with KEDA scale rule) +│ │ ├── dts.bicep # DTS scheduler + task hub +│ │ └── user-assigned-identity.bicep +│ └── core/ +│ ├── host/ # Container Apps Environment, Registry, App template +│ ├── networking/ # VNet +│ └── security/ # ACR pull role, DTS role assignments +├── shared/ # Shared connection helper +│ ├── build.gradle +│ └── src/main/java/io/durabletask/samples/ +│ └── ConnectionHelper.java +├── client/ # Schedules orchestrations in a loop, prints results +│ ├── build.gradle +│ ├── Dockerfile +│ └── src/main/java/io/durabletask/samples/ +│ └── Client.java +├── orchestrator-worker/ # Orchestrator Worker — runs orchestrations only +│ ├── build.gradle +│ ├── Dockerfile +│ └── src/main/java/io/durabletask/samples/ +│ └── OrchestratorWorker.java +├── validator-worker/ # Validator Worker — runs ValidateOrder activity only +│ ├── build.gradle +│ ├── Dockerfile +│ └── src/main/java/io/durabletask/samples/ +│ └── ValidatorWorker.java +└── shipper-worker/ # Shipper Worker — runs ShipOrder activity only + ├── build.gradle + ├── Dockerfile + └── src/main/java/io/durabletask/samples/ + └── ShipperWorker.java +``` + +## Reference + +- [Work Item Filtering PR (durabletask-java #275)](https://github.com/microsoft/durabletask-java/pull/275) +- [Durable Task Scheduler documentation](https://learn.microsoft.com/azure/durable-task-scheduler/) +- [Durable Task Java SDK](https://github.com/microsoft/durabletask-java) diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/azure.yaml b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/azure.yaml new file mode 100644 index 0000000..4a05810 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/azure.yaml @@ -0,0 +1,36 @@ +metadata: + template: work-item-filtering-split-activities-java + name: work-item-filtering-split-activities-java +services: + client: + project: . + language: java + host: containerapp + apiVersion: 2025-01-01 + docker: + path: ./client/Dockerfile + context: . + orchestrator-worker: + project: . + language: java + host: containerapp + apiVersion: 2025-01-01 + docker: + path: ./orchestrator-worker/Dockerfile + context: . + validator-worker: + project: . + language: java + host: containerapp + apiVersion: 2025-01-01 + docker: + path: ./validator-worker/Dockerfile + context: . + shipper-worker: + project: . + language: java + host: containerapp + apiVersion: 2025-01-01 + docker: + path: ./shipper-worker/Dockerfile + context: . diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/build.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/build.gradle new file mode 100644 index 0000000..229e759 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/build.gradle @@ -0,0 +1,38 @@ +// Root build file for the Work Item Filtering Split Activities Java sample. +// Each subproject (client, orchestrator-worker, validator-worker, shipper-worker) +// has its own build.gradle with project-specific settings. + +subprojects { + apply plugin: 'java' + apply plugin: 'application' + + group 'io.durabletask.samples' + version = '0.1.0' + + java { + sourceCompatibility = JavaVersion.VERSION_21 + targetCompatibility = JavaVersion.VERSION_21 + } + + repositories { + mavenLocal() + mavenCentral() + } + + def grpcVersion = '1.80.0' + + dependencies { + implementation 'com.microsoft:durabletask-client:1.9.0' + implementation 'com.microsoft:durabletask-azuremanaged:1.9.0' + implementation 'com.azure:azure-identity:1.18.2' + + // Logging + implementation 'ch.qos.logback:logback-classic:1.5.32' + implementation 'org.slf4j:slf4j-api:2.0.17' + + // gRPC + implementation "io.grpc:grpc-protobuf:${grpcVersion}" + implementation "io.grpc:grpc-stub:${grpcVersion}" + runtimeOnly "io.grpc:grpc-netty-shaded:${grpcVersion}" + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/Dockerfile b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/Dockerfile new file mode 100644 index 0000000..6175188 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/Dockerfile @@ -0,0 +1,17 @@ +FROM eclipse-temurin:21-jdk AS build +WORKDIR /project + +COPY gradlew gradlew +COPY gradle/ gradle/ +COPY build.gradle build.gradle +COPY settings.gradle settings.gradle +COPY shared/ shared/ +COPY client/ client/ + +RUN chmod +x gradlew && ./gradlew :client:installDist --no-daemon + +FROM eclipse-temurin:21-jre +WORKDIR /app +COPY --from=build /project/client/build/install/client . + +ENTRYPOINT ["bin/client"] diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/build.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/build.gradle new file mode 100644 index 0000000..2b438d8 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/build.gradle @@ -0,0 +1,7 @@ +application { + mainClass = 'io.durabletask.samples.Client' +} + +dependencies { + implementation project(':shared') +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/src/main/java/io/durabletask/samples/Client.java b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/src/main/java/io/durabletask/samples/Client.java new file mode 100644 index 0000000..aa6e0e1 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/src/main/java/io/durabletask/samples/Client.java @@ -0,0 +1,112 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package io.durabletask.samples; + +import com.microsoft.durabletask.*; +import com.microsoft.durabletask.azuremanaged.DurableTaskSchedulerClientExtensions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.time.Duration; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +/** + * Client — schedules orchestrations in a continuous loop and prints results. + * + *

Schedules a batch of 3 orchestrations every 30 seconds for 10 minutes. + * This makes it easy to observe scaling behavior over time. + */ +public final class Client { + private static final Logger logger = LoggerFactory.getLogger(Client.class); + + public static void main(String[] args) throws Exception { + String connectionString = ConnectionHelper.getConnectionString(); + + logger.info("=== Work Item Filtering Demo — Client ==="); + logger.info("Connection: {}", connectionString); + + // Create the Durable Task client + DurableTaskClient client = DurableTaskSchedulerClientExtensions + .createClientBuilder(connectionString) + .build(); + + // Run in a loop: schedule a batch of orchestrations every 30 seconds for 10 minutes + int orchestrationsPerBatch = 3; + long intervalSeconds = 30; + long totalDurationMinutes = 10; + Instant deadline = Instant.now().plusSeconds(totalDurationMinutes * 60); + + int totalCompleted = 0; + int totalFailed = 0; + int batchNumber = 0; + + logger.info("Will schedule {} orchestrations every {}s for {} minutes.", + orchestrationsPerBatch, intervalSeconds, totalDurationMinutes); + logger.info("(Make sure the Orchestrator, Validator, and Shipper workers are all running)\n"); + + while (Instant.now().isBefore(deadline)) { + batchNumber++; + logger.info("--- Batch #{} at {} ---", batchNumber, Instant.now()); + + List instanceIds = new ArrayList<>(); + for (int i = 1; i <= orchestrationsPerBatch; i++) { + String orderId = String.format("ORD-B%03d-%03d", batchNumber, i); + logger.info("Scheduling orchestration with orderId='{}'...", orderId); + + String instanceId = client.scheduleNewOrchestrationInstance( + "OrderProcessingOrchestration", + new NewOrchestrationInstanceOptions().setInput(orderId)); + + instanceIds.add(instanceId); + logger.info(" -> Scheduled with InstanceId={}", instanceId); + } + + // Wait for all orchestrations in this batch to complete + int batchCompleted = 0; + int batchFailed = 0; + + for (String id : instanceIds) { + try { + OrchestrationMetadata result = client.waitForInstanceCompletion( + id, Duration.ofSeconds(120), true); + + if (result.getRuntimeStatus() == OrchestrationRuntimeStatus.COMPLETED) { + batchCompleted++; + logger.info("COMPLETED | InstanceId={} | Output: {}", + result.getInstanceId(), result.readOutputAs(String.class)); + } else { + batchFailed++; + logger.error("FAILED | InstanceId={} | Status={}", + result.getInstanceId(), result.getRuntimeStatus()); + } + } catch (Exception ex) { + batchFailed++; + logger.error("Error waiting for orchestration {}", id, ex); + } + } + + totalCompleted += batchCompleted; + totalFailed += batchFailed; + + logger.info("Batch #{} results: {} completed, {} failed", batchNumber, batchCompleted, batchFailed); + + // Wait for the next interval (unless we've passed the deadline) + if (Instant.now().isBefore(deadline)) { + long remainingSeconds = Duration.between(Instant.now(), deadline).getSeconds(); + long waitSeconds = Math.min(remainingSeconds, intervalSeconds); + logger.info("Next batch in {}s (deadline in {} min)\n", + waitSeconds, remainingSeconds / 60); + Thread.sleep(waitSeconds * 1000); + } + } + + logger.info("\n=== FINAL RESULTS: {} completed, {} failed across {} batches ===", + totalCompleted, totalFailed, batchNumber); + + // Keep the process alive so Container Apps doesn't mark it as failed + logger.info("Demo complete. Staying alive — press Ctrl+C to exit."); + Thread.currentThread().join(); + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/src/main/resources/logback.xml b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/src/main/resources/logback.xml new file mode 100644 index 0000000..7f2565c --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/client/src/main/resources/logback.xml @@ -0,0 +1,14 @@ + + + + + %d{HH:mm:ss} %-5level %logger{36} - %msg%n + + + + + + + + + diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradle/wrapper/gradle-wrapper.jar b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..d997cfc Binary files /dev/null and b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradle/wrapper/gradle-wrapper.jar differ diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradle/wrapper/gradle-wrapper.properties b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..3499ded --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.5-bin.zip +networkTimeout=10000 +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradlew b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradlew new file mode 100755 index 0000000..0262dcb --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradlew @@ -0,0 +1,248 @@ +#!/bin/sh + +# +# Copyright © 2015 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/b631911858264c0b6e4d6603d677ff5218766cee/platforms/jvm/plugins-application/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd -P "${APP_HOME:-./}" > /dev/null && printf '%s\n' "$PWD" ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -jar "$APP_HOME/gradle/wrapper/gradle-wrapper.jar" \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradlew.bat b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradlew.bat new file mode 100644 index 0000000..c4bdd3a --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/gradlew.bat @@ -0,0 +1,93 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem +@rem SPDX-License-Identifier: Apache-2.0 +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -jar "%APP_HOME%\gradle\wrapper\gradle-wrapper.jar" %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/abbreviations.json b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/abbreviations.json new file mode 100644 index 0000000..1f9a112 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/abbreviations.json @@ -0,0 +1,139 @@ +{ + "analysisServicesServers": "as", + "apiManagementService": "apim-", + "appConfigurationStores": "appcs-", + "appManagedEnvironments": "cae-", + "appContainerApps": "ca-", + "authorizationPolicyDefinitions": "policy-", + "automationAutomationAccounts": "aa-", + "blueprintBlueprints": "bp-", + "blueprintBlueprintsArtifacts": "bpa-", + "cacheRedis": "redis-", + "cdnProfiles": "cdnp-", + "cdnProfilesEndpoints": "cdne-", + "cognitiveServicesAccounts": "cog-", + "cognitiveServicesFormRecognizer": "cog-fr-", + "cognitiveServicesTextAnalytics": "cog-ta-", + "cognitiveServicesSpeech": "cog-sp-", + "computeAvailabilitySets": "avail-", + "computeCloudServices": "cld-", + "computeDiskEncryptionSets": "des", + "computeDisks": "disk", + "computeDisksOs": "osdisk", + "computeGalleries": "gal", + "computeSnapshots": "snap-", + "computeVirtualMachines": "vm", + "computeVirtualMachineScaleSets": "vmss-", + "containerInstanceContainerGroups": "ci", + "containerRegistryRegistries": "cr", + "containerServiceManagedClusters": "aks-", + "databricksWorkspaces": "dbw-", + "dataFactoryFactories": "adf-", + "dataLakeAnalyticsAccounts": "dla", + "dataLakeStoreAccounts": "dls", + "dataMigrationServices": "dms-", + "dBforMySQLServers": "mysql-", + "dBforPostgreSQLServers": "psql-", + "devicesIotHubs": "iot-", + "devicesProvisioningServices": "provs-", + "devicesProvisioningServicesCertificates": "pcert-", + "documentDBDatabaseAccounts": "cosmos-", + "eventGridDomains": "evgd-", + "eventGridDomainsTopics": "evgt-", + "eventGridEventSubscriptions": "evgs-", + "eventHubNamespaces": "evhns-", + "eventHubNamespacesEventHubs": "evh-", + "hdInsightClustersHadoop": "hadoop-", + "hdInsightClustersHbase": "hbase-", + "hdInsightClustersKafka": "kafka-", + "hdInsightClustersMl": "mls-", + "hdInsightClustersSpark": "spark-", + "hdInsightClustersStorm": "storm-", + "hybridComputeMachines": "arcs-", + "insightsActionGroups": "ag-", + "insightsComponents": "appi-", + "keyVaultVaults": "kv-", + "kubernetesConnectedClusters": "arck", + "kustoClusters": "dec", + "kustoClustersDatabases": "dedb", + "loadTesting": "lt-", + "logicIntegrationAccounts": "ia-", + "logicWorkflows": "logic-", + "machineLearningServicesWorkspaces": "mlw-", + "managedIdentityUserAssignedIdentities": "id-", + "managementManagementGroups": "mg-", + "migrateAssessmentProjects": "migr-", + "networkApplicationGateways": "agw-", + "networkApplicationSecurityGroups": "asg-", + "networkAzureFirewalls": "afw-", + "networkBastionHosts": "bas-", + "networkConnections": "con-", + "networkDnsZones": "dnsz-", + "networkExpressRouteCircuits": "erc-", + "networkFirewallPolicies": "afwp-", + "networkFirewallPoliciesWebApplication": "waf", + "networkFirewallPoliciesRuleGroups": "wafrg", + "networkFrontDoors": "fd-", + "networkFrontdoorWebApplicationFirewallPolicies": "fdfp-", + "networkLoadBalancersExternal": "lbe-", + "networkLoadBalancersInternal": "lbi-", + "networkLoadBalancersInboundNatRules": "rule-", + "networkLocalNetworkGateways": "lgw-", + "networkNatGateways": "ng-", + "networkNetworkInterfaces": "nic-", + "networkNetworkSecurityGroups": "nsg-", + "networkNetworkSecurityGroupsSecurityRules": "nsgsr-", + "networkNetworkWatchers": "nw-", + "networkPrivateDnsZones": "pdnsz-", + "networkPrivateLinkServices": "pl-", + "networkPublicIPAddresses": "pip-", + "networkPublicIPPrefixes": "ippre-", + "networkRouteFilters": "rf-", + "networkRouteTables": "rt-", + "networkRouteTablesRoutes": "udr-", + "networkTrafficManagerProfiles": "traf-", + "networkVirtualNetworkGateways": "vgw-", + "networkVirtualNetworks": "vnet-", + "networkVirtualNetworksSubnets": "snet-", + "networkVirtualNetworksVirtualNetworkPeerings": "peer-", + "networkVirtualWans": "vwan-", + "networkVpnGateways": "vpng-", + "networkVpnGatewaysVpnConnections": "vcn-", + "networkVpnGatewaysVpnSites": "vst-", + "notificationHubsNamespaces": "ntfns-", + "notificationHubsNamespacesNotificationHubs": "ntf-", + "operationalInsightsWorkspaces": "log-", + "portalDashboards": "dash-", + "powerBIDedicatedCapacities": "pbi-", + "purviewAccounts": "pview-", + "recoveryServicesVaults": "rsv-", + "resourcesResourceGroups": "rg-", + "searchSearchServices": "srch-", + "serviceBusNamespaces": "sb-", + "serviceBusNamespacesQueues": "sbq-", + "serviceBusNamespacesTopics": "sbt-", + "serviceEndPointPolicies": "se-", + "serviceFabricClusters": "sf-", + "signalRServiceSignalR": "sigr", + "sqlManagedInstances": "sqlmi-", + "sqlServers": "sql-", + "sqlServersDataWarehouse": "sqldw-", + "sqlServersDatabases": "sqldb-", + "sqlServersDatabasesStretch": "sqlstrdb-", + "storageStorageAccounts": "st", + "storageStorageAccountsVm": "stvm", + "storSimpleManagers": "ssimp", + "streamAnalyticsCluster": "asa-", + "synapseWorkspaces": "syn", + "synapseWorkspacesAnalyticsWorkspaces": "synw", + "synapseWorkspacesSqlPoolsDedicated": "syndp", + "synapseWorkspacesSqlPoolsSpark": "synsp", + "timeSeriesInsightsEnvironments": "tsi-", + "webServerFarms": "plan-", + "webSitesAppService": "app-", + "webSitesAppServiceEnvironment": "ase-", + "webSitesFunctions": "func-", + "webStaticSites": "stapp-", + "dts": "dts-", + "taskhub": "taskhub-" +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/app.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/app.bicep new file mode 100644 index 0000000..ec4953a --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/app.bicep @@ -0,0 +1,66 @@ +param appName string +param location string = resourceGroup().location +param tags object = {} + +param identityName string +param containerAppsEnvironmentName string +param containerRegistryName string +param serviceName string = 'aca' +param dtsEndpoint string +param taskHubName string + +@description('DTS work item type for KEDA scaling: Orchestration or Activity') +param workItemType string = 'Orchestration' + +type managedIdentity = { + resourceId: string + clientId: string +} + +@description('Unique identifier for user-assigned managed identity.') +param userAssignedManagedIdentity managedIdentity + +module containerAppsApp '../core/host/container-app.bicep' = { + name: 'container-apps-${serviceName}' + params: { + name: appName + containerAppsEnvironmentName: containerAppsEnvironmentName + containerRegistryName: containerRegistryName + location: location + tags: union(tags, { 'azd-service-name': serviceName }) + ingressEnabled: false + secrets: { + 'azure-managed-identity-client-id': userAssignedManagedIdentity.clientId + } + env: [ + { + name: 'AZURE_MANAGED_IDENTITY_CLIENT_ID' + secretRef: 'azure-managed-identity-client-id' + } + { + name: 'ENDPOINT' + value: dtsEndpoint + } + { + name: 'TASKHUB' + value: taskHubName + } + ] + identityName: identityName + containerMinReplicas: 0 + containerMaxReplicas: 10 + enableCustomScaleRule: true + scaleRuleName: 'dtsscaler-${serviceName}' + scaleRuleType: 'azure-durabletask-scheduler' + scaleRuleMetadata: { + endpoint: dtsEndpoint + maxConcurrentWorkItemsCount: '1' + taskhubName: taskHubName + workItemType: workItemType + } + scaleRuleIdentity: userAssignedManagedIdentity.resourceId + } +} + +output endpoint string = containerAppsApp.outputs.uri +output envName string = containerAppsApp.outputs.name diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/dts.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/dts.bicep new file mode 100644 index 0000000..6280e5a --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/dts.bicep @@ -0,0 +1,31 @@ +param ipAllowlist array +param location string +param tags object = {} +param name string +param taskhubname string +param skuName string +param skuCapacity int = 0 + +resource dts 'Microsoft.DurableTask/schedulers@2025-11-01' = { + location: location + tags: tags + name: name + properties: { + ipAllowlist: ipAllowlist + sku: skuName == 'Dedicated' ? { + name: skuName + capacity: skuCapacity + } : { + name: skuName + } + } +} + +resource taskhub 'Microsoft.DurableTask/schedulers/taskhubs@2025-11-01' = { + parent: dts + name: taskhubname +} + +output dts_NAME string = dts.name +output dts_URL string = dts.properties.endpoint +output TASKHUB_NAME string = taskhub.name diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/user-assigned-identity.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/user-assigned-identity.bicep new file mode 100644 index 0000000..0583ab8 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/app/user-assigned-identity.bicep @@ -0,0 +1,17 @@ +metadata description = 'Creates a Microsoft Entra user-assigned identity.' + +param name string +param location string = resourceGroup().location +param tags object = {} + +resource identity 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-01-31' = { + name: name + location: location + tags: tags +} + +output name string = identity.name +output resourceId string = identity.id +output principalId string = identity.properties.principalId +output clientId string = identity.properties.clientId +output tenantId string = identity.properties.tenantId diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-app.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-app.bicep new file mode 100644 index 0000000..8b1df5c --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-app.bicep @@ -0,0 +1,195 @@ +metadata description = 'Creates a container app in an Azure Container App environment.' +param name string +param location string = resourceGroup().location +param tags object = {} + +@description('Allowed origins') +param allowedOrigins array = [] + +@description('Name of the environment for container apps') +param containerAppsEnvironmentName string + +@description('CPU cores allocated to a single container instance, e.g., 0.5') +param containerCpuCoreCount string = '0.5' + +@description('The maximum number of replicas to run. Must be at least 1.') +@minValue(1) +param containerMaxReplicas int = 10 + +@description('Memory allocated to a single container instance, e.g., 1Gi') +param containerMemory string = '1.0Gi' + +@description('The minimum number of replicas to run. Must be at least 0.') +param containerMinReplicas int = 0 + +@description('The name of the container') +param containerName string = 'main' + +@description('Enable custom scale rule') +param enableCustomScaleRule bool = false + +@description('Scale rule name') +param scaleRuleName string = 'scaler' + +@description('Scale rule type') +param scaleRuleType string = '' + +@description('Scale rule metadata') +param scaleRuleMetadata object = {} + +@description('Scale rule identity') +param scaleRuleIdentity string = '' + +@description('The name of the container registry') +param containerRegistryName string = '' + +@description('Hostname suffix for container registry. Set when deploying to sovereign clouds') +param containerRegistryHostSuffix string = 'azurecr.io' + +@description('The protocol used by Dapr to connect to the app, e.g., http or grpc') +@allowed([ 'http', 'grpc' ]) +param daprAppProtocol string = 'http' + +@description('The Dapr app ID') +param daprAppId string = containerName + +@description('Enable Dapr') +param daprEnabled bool = false + +@description('The environment variables for the container') +param env array = [] + +@description('Specifies if the resource ingress is exposed externally') +param external bool = true + +@description('The name of the user-assigned identity') +param identityName string = '' + +@description('The type of identity for the resource') +@allowed([ 'None', 'SystemAssigned', 'UserAssigned' ]) +param identityType string = 'None' + +@description('The name of the container image') +param imageName string = '' + +@description('Specifies if Ingress is enabled for the container app') +param ingressEnabled bool = true + +param revisionMode string = 'Single' + +@description('The secrets required for the container') +@secure() +param secrets object = {} + +@description('The service binds associated with the container') +param serviceBinds array = [] + +@description('The name of the container apps add-on to use. e.g. redis') +param serviceType string = '' + +@description('The target port for the container') +param targetPort int = 80 + +resource userIdentity 'Microsoft.ManagedIdentity/userAssignedIdentities@2023-01-31' existing = if (!empty(identityName)) { + name: identityName +} + +// Private registry support requires both an ACR name and a User Assigned managed identity +var usePrivateRegistry = !empty(identityName) && !empty(containerRegistryName) + +// Automatically set to `UserAssigned` when an `identityName` has been set +var normalizedIdentityType = !empty(identityName) ? 'UserAssigned' : identityType + +module containerRegistryAccess '../security/registry-access.bicep' = if (usePrivateRegistry) { + name: '${deployment().name}-registry-access' + params: { + containerRegistryName: containerRegistryName + principalId: usePrivateRegistry ? userIdentity!.properties.principalId : '' + } +} + +resource app 'Microsoft.App/containerApps@2025-01-01' = { + name: name + location: location + tags: tags + // It is critical that the identity is granted ACR pull access before the app is created + // otherwise the container app will throw a provision error + // This also forces us to use an user assigned managed identity since there would no way to + // provide the system assigned identity with the ACR pull access before the app is created + dependsOn: usePrivateRegistry ? [ containerRegistryAccess ] : [] + identity: { + type: normalizedIdentityType + userAssignedIdentities: !empty(identityName) && normalizedIdentityType == 'UserAssigned' ? { '${userIdentity.id}': {} } : null + } + properties: { + managedEnvironmentId: containerAppsEnvironment.id + configuration: { + activeRevisionsMode: revisionMode + ingress: ingressEnabled ? { + external: external + targetPort: targetPort + transport: 'auto' + corsPolicy: { + allowedOrigins: union([ 'https://portal.azure.com', 'https://ms.portal.azure.com' ], allowedOrigins) + } + } : null + dapr: daprEnabled ? { + enabled: true + appId: daprAppId + appProtocol: daprAppProtocol + appPort: ingressEnabled ? targetPort : 0 + } : { enabled: false } + secrets: [for secret in items(secrets): { + name: secret.key + #disable-next-line use-secure-value-for-secure-inputs + value: secret.value + }] + service: !empty(serviceType) ? { type: serviceType } : null + registries: usePrivateRegistry ? [ + { + server: '${containerRegistryName}.${containerRegistryHostSuffix}' + identity: userIdentity.id + } + ] : [] + } + template: { + serviceBinds: !empty(serviceBinds) ? serviceBinds : null + containers: [ + { + image: !empty(imageName) ? imageName : 'mcr.microsoft.com/azuredocs/containerapps-helloworld:latest' + name: containerName + env: env + resources: { + cpu: json(containerCpuCoreCount) + memory: containerMemory + } + } + ] + scale: { + minReplicas: containerMinReplicas + maxReplicas: containerMaxReplicas + rules: enableCustomScaleRule ? [ + { + name: scaleRuleName + custom: { + type: scaleRuleType + metadata: scaleRuleMetadata + identity: scaleRuleIdentity + } + } + ] : [] + } + } + } +} + +resource containerAppsEnvironment 'Microsoft.App/managedEnvironments@2023-05-01' existing = { + name: containerAppsEnvironmentName +} + +output defaultDomain string = containerAppsEnvironment.properties.defaultDomain +output identityPrincipalId string = normalizedIdentityType == 'None' ? '' : (empty(identityName) ? app.identity.principalId : userIdentity!.properties.principalId) +output imageName string = imageName +output name string = app.name +output serviceBind object = !empty(serviceType) ? { serviceId: app.id, name: name } : {} +output uri string = ingressEnabled ? 'https://${app.properties.configuration.ingress.fqdn}' : '' diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-apps-environment.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-apps-environment.bicep new file mode 100644 index 0000000..560d293 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-apps-environment.bicep @@ -0,0 +1,27 @@ +metadata description = 'Creates an Azure Container Apps environment.' +param name string +param location string = resourceGroup().location +param tags object = {} + +@description('Subnet resource ID for the Container Apps environment') +param subnetResourceId string = '' + +@description('Whether to use an internal or external load balancer') +@allowed(['Internal', 'External']) +param loadBalancerType string = 'External' + +resource containerAppsEnvironment 'Microsoft.App/managedEnvironments@2023-05-01' = { + name: name + location: location + tags: tags + properties: { + vnetConfiguration: !empty(subnetResourceId) ? { + infrastructureSubnetId: subnetResourceId + internal: loadBalancerType == 'Internal' + } : null + } +} + +output defaultDomain string = containerAppsEnvironment.properties.defaultDomain +output id string = containerAppsEnvironment.id +output name string = containerAppsEnvironment.name diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-apps.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-apps.bicep new file mode 100644 index 0000000..4511423 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-apps.bicep @@ -0,0 +1,44 @@ +metadata description = 'Creates an Azure Container Registry and an Azure Container Apps environment.' +param name string +param location string = resourceGroup().location +param tags object = {} + +param containerAppsEnvironmentName string +param containerRegistryName string +param containerRegistryAdminUserEnabled bool = false + +// Virtual network and subnet parameters +param subnetResourceId string = '' +param loadBalancerType string = 'External' + +module containerAppsEnvironment 'container-apps-environment.bicep' = { + name: '${name}-container-apps-environment' + params: { + name: containerAppsEnvironmentName + location: location + tags: tags + subnetResourceId: subnetResourceId + loadBalancerType: loadBalancerType + } +} + +module containerRegistry 'container-registry.bicep' = { + name: '${name}-container-registry' + params: { + name: containerRegistryName + location: location + adminUserEnabled: containerRegistryAdminUserEnabled + tags: tags + sku: { + name: 'Standard' + } + anonymousPullEnabled: false + } +} + +output defaultDomain string = containerAppsEnvironment.outputs.defaultDomain +output environmentName string = containerAppsEnvironment.outputs.name +output environmentId string = containerAppsEnvironment.outputs.id + +output registryLoginServer string = containerRegistry.outputs.loginServer +output registryName string = containerRegistry.outputs.name diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-registry.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-registry.bicep new file mode 100644 index 0000000..9ea04a2 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/host/container-registry.bicep @@ -0,0 +1,134 @@ +metadata description = 'Creates an Azure Container Registry.' +param name string +param location string = resourceGroup().location +param tags object = {} + +@description('Indicates whether admin user is enabled') +param adminUserEnabled bool = false + +@description('Indicates whether anonymous pull is enabled') +param anonymousPullEnabled bool = false + +@description('Azure ad authentication as arm policy settings') +param azureADAuthenticationAsArmPolicy object = { + status: 'enabled' +} + +@description('Indicates whether data endpoint is enabled') +param dataEndpointEnabled bool = false + +@description('Encryption settings') +param encryption object = { + status: 'disabled' +} + +@description('Export policy settings') +param exportPolicy object = { + status: 'enabled' +} + +@description('Metadata search settings') +param metadataSearch string = 'Disabled' + +@description('Options for bypassing network rules') +param networkRuleBypassOptions string = 'AzureServices' + +@description('Public network access setting') +param publicNetworkAccess string = 'Enabled' + +@description('Quarantine policy settings') +param quarantinePolicy object = { + status: 'disabled' +} + +@description('Retention policy settings') +param retentionPolicy object = { + days: 7 + status: 'disabled' +} + +@description('Scope maps setting') +param scopeMaps array = [] + +@description('SKU settings') +param sku object = { + name: 'Basic' +} + +@description('Soft delete policy settings') +param softDeletePolicy object = { + retentionDays: 7 + status: 'disabled' +} + +@description('Trust policy settings') +param trustPolicy object = { + type: 'Notary' + status: 'disabled' +} + +@description('Zone redundancy setting') +param zoneRedundancy string = 'Disabled' + +@description('The log analytics workspace ID used for logging and monitoring') +param workspaceId string = '' + +// 2023-11-01-preview needed for metadataSearch +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-11-01-preview' = { + name: name + location: location + tags: tags + sku: sku + properties: { + adminUserEnabled: adminUserEnabled + anonymousPullEnabled: anonymousPullEnabled + dataEndpointEnabled: dataEndpointEnabled + encryption: encryption + metadataSearch: metadataSearch + networkRuleBypassOptions: networkRuleBypassOptions + policies:{ + quarantinePolicy: quarantinePolicy + trustPolicy: trustPolicy + retentionPolicy: retentionPolicy + exportPolicy: exportPolicy + azureADAuthenticationAsArmPolicy: azureADAuthenticationAsArmPolicy + softDeletePolicy: softDeletePolicy + } + publicNetworkAccess: publicNetworkAccess + zoneRedundancy: zoneRedundancy + } + + resource scopeMap 'scopeMaps' = [for scopeMap in scopeMaps: { + name: scopeMap.name + properties: scopeMap.properties + }] +} + +resource diagnostics 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = if (!empty(workspaceId)) { + name: 'registry-diagnostics' + scope: containerRegistry + properties: { + workspaceId: workspaceId + logs: [ + { + category: 'ContainerRegistryRepositoryEvents' + enabled: true + } + { + category: 'ContainerRegistryLoginEvents' + enabled: true + } + ] + metrics: [ + { + category: 'AllMetrics' + enabled: true + timeGrain: 'PT1M' + } + ] + } +} + +output id string = containerRegistry.id +output loginServer string = containerRegistry.properties.loginServer +output name string = containerRegistry.name diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/networking/vnet.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/networking/vnet.bicep new file mode 100644 index 0000000..57b5051 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/networking/vnet.bicep @@ -0,0 +1,50 @@ +@description('The name of the Virtual Network') +param name string + +@description('The Azure region where the Virtual Network should exist') +param location string = resourceGroup().location + +@description('Optional tags for the resources') +param tags object = {} + +@description('The address prefixes of the Virtual Network') +param addressPrefixes array = ['10.0.0.0/16'] + +@description('The subnets to create in the Virtual Network') +param subnets array = [ + { + name: 'infrastructure-subnet' + properties: { + addressPrefix: '10.0.0.0/21' + delegations: [] + privateEndpointNetworkPolicies: 'Disabled' + privateLinkServiceNetworkPolicies: 'Enabled' + } + } + { + name: 'workload-subnet' + properties: { + addressPrefix: '10.0.8.0/21' + delegations: [] + privateEndpointNetworkPolicies: 'Disabled' + privateLinkServiceNetworkPolicies: 'Enabled' + } + } +] + +resource vnet 'Microsoft.Network/virtualNetworks@2022-07-01' = { + name: name + location: location + tags: tags + properties: { + addressSpace: { + addressPrefixes: addressPrefixes + } + subnets: subnets + } +} + +output id string = vnet.id +output name string = vnet.name +output infrastructureSubnetId string = resourceId('Microsoft.Network/virtualNetworks/subnets', name, 'infrastructure-subnet') +output workloadSubnetId string = resourceId('Microsoft.Network/virtualNetworks/subnets', name, 'workload-subnet') diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/security/registry-access.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/security/registry-access.bicep new file mode 100644 index 0000000..fc66837 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/security/registry-access.bicep @@ -0,0 +1,19 @@ +metadata description = 'Assigns ACR Pull permissions to access an Azure Container Registry.' +param containerRegistryName string +param principalId string + +var acrPullRole = subscriptionResourceId('Microsoft.Authorization/roleDefinitions', '7f951dda-4ed3-4680-a7ca-43fe172d538d') + +resource aksAcrPull 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + scope: containerRegistry // Use when specifying a scope that is different than the deployment scope + name: guid(subscription().id, resourceGroup().id, principalId, acrPullRole) + properties: { + roleDefinitionId: acrPullRole + principalType: 'ServicePrincipal' + principalId: principalId + } +} + +resource containerRegistry 'Microsoft.ContainerRegistry/registries@2023-01-01-preview' existing = { + name: containerRegistryName +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/security/role.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/security/role.bicep new file mode 100644 index 0000000..0b30cfd --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/core/security/role.bicep @@ -0,0 +1,21 @@ +metadata description = 'Creates a role assignment for a service principal.' +param principalId string + +@allowed([ + 'Device' + 'ForeignGroup' + 'Group' + 'ServicePrincipal' + 'User' +]) +param principalType string = 'ServicePrincipal' +param roleDefinitionId string + +resource role 'Microsoft.Authorization/roleAssignments@2022-04-01' = { + name: guid(subscription().id, resourceGroup().id, principalId, roleDefinitionId) + properties: { + principalId: principalId + principalType: principalType + roleDefinitionId: resourceId('Microsoft.Authorization/roleDefinitions', roleDefinitionId) + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/main.bicep b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/main.bicep new file mode 100644 index 0000000..5bcab16 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/main.bicep @@ -0,0 +1,209 @@ +targetScope = 'subscription' + +@minLength(1) +@maxLength(64) +@description('Name of the the environment which is used to generate a short unique hash used in all resources.') +param environmentName string + +@minLength(1) +@description('Primary location for all resources') +param location string + +@description('Id of the user or app to assign application roles') +param principalId string = '' + +param containerAppsEnvName string = '' +param containerAppsAppName string = '' +param containerRegistryName string = '' +param dtsLocation string = 'centralus' +param dtsSkuName string = 'Consumption' +param dtsCapacity int = 1 +param dtsName string = '' +param taskHubName string = '' + +param clientServiceName string = 'client' +param orchestratorWorkerServiceName string = 'orchestrator-worker' +param validatorWorkerServiceName string = 'validator-worker' +param shipperWorkerServiceName string = 'shipper-worker' + +param resourceGroupName string = '' + +var abbrs = loadJsonContent('./abbreviations.json') + +var tags = { + 'azd-env-name': environmentName +} + +#disable-next-line no-unused-vars +var resourceToken = toLower(uniqueString(subscription().id, environmentName, location)) + +// Resource Group +resource rg 'Microsoft.Resources/resourceGroups@2021-04-01' = { + name: !empty(resourceGroupName) ? resourceGroupName : '${abbrs.resourcesResourceGroups}${environmentName}' + location: location + tags: tags +} + +// User-assigned managed identity for all container apps +module identity './app/user-assigned-identity.bicep' = { + name: 'identity' + scope: rg + params: { + name: 'dts-ca-identity' + } +} + +// Assign DTS Worker/Client role to the managed identity +module identityAssignDTS './core/security/role.bicep' = { + name: 'identityAssignDTS' + scope: rg + params: { + principalId: identity.outputs.principalId + roleDefinitionId: '0ad04412-c4d5-4796-b79c-f76d14c8d402' + principalType: 'ServicePrincipal' + } +} + +// Assign DTS role to the deploying user (for dashboard access) +module identityAssignDTSDash './core/security/role.bicep' = { + name: 'identityAssignDTSDash' + scope: rg + params: { + principalId: principalId + roleDefinitionId: '0ad04412-c4d5-4796-b79c-f76d14c8d402' + principalType: 'User' + } +} + +// Virtual network +module vnet './core/networking/vnet.bicep' = { + name: 'vnet' + scope: rg + params: { + name: '${abbrs.networkVirtualNetworks}${resourceToken}' + location: location + tags: tags + } +} + +// Container Apps Environment + Container Registry +module containerAppsEnv './core/host/container-apps.bicep' = { + name: 'container-apps' + scope: rg + params: { + name: 'app' + containerAppsEnvironmentName: !empty(containerAppsEnvName) ? containerAppsEnvName : '${abbrs.appManagedEnvironments}${resourceToken}' + containerRegistryName: !empty(containerRegistryName) ? containerRegistryName : '${abbrs.containerRegistryRegistries}${resourceToken}' + location: location + subnetResourceId: vnet.outputs.infrastructureSubnetId + loadBalancerType: 'External' + } +} + +// Durable Task Scheduler + Task Hub +module dts './app/dts.bicep' = { + scope: rg + name: 'dtsResource' + params: { + name: !empty(dtsName) ? dtsName : '${abbrs.dts}${resourceToken}' + taskhubname: !empty(taskHubName) ? taskHubName : '${abbrs.taskhub}${resourceToken}' + location: dtsLocation + tags: tags + ipAllowlist: ['0.0.0.0/0'] + skuName: dtsSkuName + skuCapacity: dtsCapacity + } +} + +// Client — schedules orchestrations and polls for results +module client 'app/app.bicep' = { + name: clientServiceName + scope: rg + params: { + appName: !empty(containerAppsAppName) ? '${containerAppsAppName}-client' : '${abbrs.appContainerApps}${resourceToken}-client' + containerAppsEnvironmentName: containerAppsEnv.outputs.environmentName + containerRegistryName: containerAppsEnv.outputs.registryName + userAssignedManagedIdentity: { + resourceId: identity.outputs.resourceId + clientId: identity.outputs.clientId + } + location: location + tags: tags + serviceName: 'client' + identityName: identity.outputs.name + dtsEndpoint: dts.outputs.dts_URL + taskHubName: dts.outputs.TASKHUB_NAME + } +} + +// Orchestrator Worker — handles orchestrations only +module orchestratorWorker 'app/app.bicep' = { + name: orchestratorWorkerServiceName + scope: rg + params: { + appName: !empty(containerAppsAppName) ? '${containerAppsAppName}-orchestrator' : '${abbrs.appContainerApps}${resourceToken}-orchestrator' + containerAppsEnvironmentName: containerAppsEnv.outputs.environmentName + containerRegistryName: containerAppsEnv.outputs.registryName + userAssignedManagedIdentity: { + resourceId: identity.outputs.resourceId + clientId: identity.outputs.clientId + } + location: location + tags: tags + serviceName: 'orchestrator-worker' + identityName: identity.outputs.name + dtsEndpoint: dts.outputs.dts_URL + taskHubName: dts.outputs.TASKHUB_NAME + workItemType: 'Orchestration' + } +} + +// Validator Worker — handles ValidateOrder activity only +module validatorWorker 'app/app.bicep' = { + name: validatorWorkerServiceName + scope: rg + params: { + appName: !empty(containerAppsAppName) ? '${containerAppsAppName}-validator' : '${abbrs.appContainerApps}${resourceToken}-validator' + containerAppsEnvironmentName: containerAppsEnv.outputs.environmentName + containerRegistryName: containerAppsEnv.outputs.registryName + userAssignedManagedIdentity: { + resourceId: identity.outputs.resourceId + clientId: identity.outputs.clientId + } + location: location + tags: tags + serviceName: 'validator-worker' + identityName: identity.outputs.name + dtsEndpoint: dts.outputs.dts_URL + taskHubName: dts.outputs.TASKHUB_NAME + workItemType: 'Activity' + } +} + +// Shipper Worker — handles ShipOrder activity only +module shipperWorker 'app/app.bicep' = { + name: shipperWorkerServiceName + scope: rg + params: { + appName: !empty(containerAppsAppName) ? '${containerAppsAppName}-shipper' : '${abbrs.appContainerApps}${resourceToken}-shipper' + containerAppsEnvironmentName: containerAppsEnv.outputs.environmentName + containerRegistryName: containerAppsEnv.outputs.registryName + userAssignedManagedIdentity: { + resourceId: identity.outputs.resourceId + clientId: identity.outputs.clientId + } + location: location + tags: tags + serviceName: 'shipper-worker' + identityName: identity.outputs.name + dtsEndpoint: dts.outputs.dts_URL + taskHubName: dts.outputs.TASKHUB_NAME + workItemType: 'Activity' + } +} + +output AZURE_LOCATION string = location +output AZURE_TENANT_ID string = tenant().tenantId +output AZURE_CONTAINER_REGISTRY_ENDPOINT string = containerAppsEnv.outputs.registryLoginServer +output AZURE_CONTAINER_REGISTRY_NAME string = containerAppsEnv.outputs.registryName +output AZURE_USER_ASSIGNED_IDENTITY_NAME string = identity.outputs.name diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/main.parameters.json b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/main.parameters.json new file mode 100644 index 0000000..c8d3453 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/infra/main.parameters.json @@ -0,0 +1,15 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentParameters.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "environmentName": { + "value": "${AZURE_ENV_NAME}" + }, + "location": { + "value": "${AZURE_LOCATION}" + }, + "principalId": { + "value": "${AZURE_PRINCIPAL_ID}" + } + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/Dockerfile b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/Dockerfile new file mode 100644 index 0000000..2698439 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/Dockerfile @@ -0,0 +1,18 @@ +FROM eclipse-temurin:21-jdk AS build +WORKDIR /project + +# Copy Gradle wrapper and build files first (for layer caching) +COPY gradlew gradlew +COPY gradle/ gradle/ +COPY build.gradle build.gradle +COPY settings.gradle settings.gradle +COPY shared/ shared/ +COPY orchestrator-worker/ orchestrator-worker/ + +RUN chmod +x gradlew && ./gradlew :orchestrator-worker:installDist --no-daemon + +FROM eclipse-temurin:21-jre +WORKDIR /app +COPY --from=build /project/orchestrator-worker/build/install/orchestrator-worker . + +ENTRYPOINT ["bin/orchestrator-worker"] diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/build.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/build.gradle new file mode 100644 index 0000000..f6be34e --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/build.gradle @@ -0,0 +1,7 @@ +application { + mainClass = 'io.durabletask.samples.OrchestratorWorker' +} + +dependencies { + implementation project(':shared') +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/src/main/java/io/durabletask/samples/OrchestratorWorker.java b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/src/main/java/io/durabletask/samples/OrchestratorWorker.java new file mode 100644 index 0000000..440ed37 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/src/main/java/io/durabletask/samples/OrchestratorWorker.java @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package io.durabletask.samples; + +import com.microsoft.durabletask.*; +import com.microsoft.durabletask.azuremanaged.DurableTaskSchedulerWorkerExtensions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Orchestrator Worker — runs only the {@code OrderProcessingOrchestration}. + * + *

This worker registers a single orchestration and enables auto-generated work item filters. + * DTS will route only orchestration work items for {@code OrderProcessingOrchestration} to this worker. + * It will never receive activity work items. + */ +public final class OrchestratorWorker { + private static final Logger logger = LoggerFactory.getLogger(OrchestratorWorker.class); + + public static void main(String[] args) throws IOException, InterruptedException { + String connectionString = ConnectionHelper.getConnectionString(); + + logger.info("[Orchestrator] Connection: {}", connectionString); + logger.info("[Orchestrator] This worker registers ONLY the orchestration. No activities."); + + // Build the worker with only the orchestration registered. + // useWorkItemFilters() auto-generates filters from the registry, so this worker + // will ONLY receive orchestration work items — never activity work items. + DurableTaskGrpcWorker worker = DurableTaskSchedulerWorkerExtensions + .createWorkerBuilder(connectionString) + .addOrchestration(new TaskOrchestrationFactory() { + @Override + public String getName() { + return "OrderProcessingOrchestration"; + } + + @Override + public TaskOrchestration create() { + return ctx -> { + String orderId = ctx.getInput(String.class); + + logger.info("[Orchestrator] Orchestration | Name=OrderProcessingOrchestration | InstanceId={} | Processing order '{}'", + ctx.getInstanceId(), orderId); + + // Step 1: Validate the order (routed to Validator Worker) + logger.info("[Orchestrator] Orchestration | InstanceId={} | Dispatching ValidateOrder to Validator Worker...", + ctx.getInstanceId()); + String validationResult = ctx.callActivity("ValidateOrder", orderId, String.class).await(); + + // Step 2: Ship the order (routed to Shipper Worker) + logger.info("[Orchestrator] Orchestration | InstanceId={} | Dispatching ShipOrder to Shipper Worker...", + ctx.getInstanceId()); + String shippingResult = ctx.callActivity("ShipOrder", orderId, String.class).await(); + + String combined = String.format("Order '%s' => Validation: [%s], Shipping: [%s]", + orderId, validationResult, shippingResult); + + logger.info("[Orchestrator] Orchestration | InstanceId={} | Completed: {}", + ctx.getInstanceId(), combined); + + ctx.complete(combined); + }; + } + }) + .useWorkItemFilters() // auto-generate from registered tasks + .build(); + + logger.info("[Orchestrator] Starting... waiting for orchestration work items only."); + worker.start(); + + // Keep the process alive + logger.info("[Orchestrator] Worker started. Press Ctrl+C to exit."); + Thread.currentThread().join(); + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/src/main/resources/logback.xml b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/src/main/resources/logback.xml new file mode 100644 index 0000000..7f2565c --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/orchestrator-worker/src/main/resources/logback.xml @@ -0,0 +1,14 @@ + + + + + %d{HH:mm:ss} %-5level %logger{36} - %msg%n + + + + + + + + + diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/settings.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/settings.gradle new file mode 100644 index 0000000..f3638ee --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/settings.gradle @@ -0,0 +1,7 @@ +rootProject.name = 'work-item-filtering-split-activities-java' + +include 'shared' +include 'client' +include 'orchestrator-worker' +include 'validator-worker' +include 'shipper-worker' diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shared/build.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shared/build.gradle new file mode 100644 index 0000000..3361a85 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shared/build.gradle @@ -0,0 +1 @@ +// Shared library — connection helper and common utilities diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shared/src/main/java/io/durabletask/samples/ConnectionHelper.java b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shared/src/main/java/io/durabletask/samples/ConnectionHelper.java new file mode 100644 index 0000000..c15e550 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shared/src/main/java/io/durabletask/samples/ConnectionHelper.java @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package io.durabletask.samples; + +/** + * Shared helper for building DTS connection strings from environment variables. + * + *

Supports three modes: + *

+ */ +final class ConnectionHelper { + + private ConnectionHelper() { + } + + static String getConnectionString() { + String connectionString = System.getenv("DURABLE_TASK_CONNECTION_STRING"); + if (connectionString != null) { + return connectionString; + } + + String endpoint = System.getenv("ENDPOINT"); + if (endpoint == null) { + endpoint = "http://localhost:8080"; + } + + String taskHub = System.getenv("TASKHUB"); + if (taskHub == null) { + taskHub = "default"; + } + + String managedIdentityClientId = System.getenv("AZURE_MANAGED_IDENTITY_CLIENT_ID"); + boolean isLocalEmulator = endpoint.equals("http://localhost:8080"); + + if (isLocalEmulator) { + return String.format("Endpoint=%s;TaskHub=%s;Authentication=None", endpoint, taskHub); + } else if (managedIdentityClientId != null && !managedIdentityClientId.isEmpty()) { + return String.format("Endpoint=%s;TaskHub=%s;Authentication=ManagedIdentity;ClientID=%s", + endpoint, taskHub, managedIdentityClientId); + } else { + return String.format("Endpoint=%s;TaskHub=%s;Authentication=DefaultAzure", endpoint, taskHub); + } + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/Dockerfile b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/Dockerfile new file mode 100644 index 0000000..27e7c4c --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/Dockerfile @@ -0,0 +1,17 @@ +FROM eclipse-temurin:21-jdk AS build +WORKDIR /project + +COPY gradlew gradlew +COPY gradle/ gradle/ +COPY build.gradle build.gradle +COPY settings.gradle settings.gradle +COPY shared/ shared/ +COPY shipper-worker/ shipper-worker/ + +RUN chmod +x gradlew && ./gradlew :shipper-worker:installDist --no-daemon + +FROM eclipse-temurin:21-jre +WORKDIR /app +COPY --from=build /project/shipper-worker/build/install/shipper-worker . + +ENTRYPOINT ["bin/shipper-worker"] diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/build.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/build.gradle new file mode 100644 index 0000000..581da14 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/build.gradle @@ -0,0 +1,7 @@ +application { + mainClass = 'io.durabletask.samples.ShipperWorker' +} + +dependencies { + implementation project(':shared') +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/src/main/java/io/durabletask/samples/ShipperWorker.java b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/src/main/java/io/durabletask/samples/ShipperWorker.java new file mode 100644 index 0000000..617054a --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/src/main/java/io/durabletask/samples/ShipperWorker.java @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package io.durabletask.samples; + +import com.microsoft.durabletask.*; +import com.microsoft.durabletask.azuremanaged.DurableTaskSchedulerWorkerExtensions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.concurrent.ThreadLocalRandom; + +/** + * Shipper Worker — runs only the {@code ShipOrder} activity. + * + *

This worker registers a single activity and enables auto-generated work item filters. + * DTS will route only {@code ShipOrder} activity work items to this worker. + * It will never receive orchestration or other activity work items. + */ +public final class ShipperWorker { + private static final Logger logger = LoggerFactory.getLogger(ShipperWorker.class); + + public static void main(String[] args) throws IOException, InterruptedException { + String connectionString = ConnectionHelper.getConnectionString(); + + logger.info("[Shipper] Connection: {}", connectionString); + logger.info("[Shipper] This worker registers ONLY the ShipOrder activity."); + + // Build the worker with only the ShipOrder activity registered. + // useWorkItemFilters() auto-generates filters from the registry, so this worker + // will ONLY receive ShipOrder activity work items. + DurableTaskGrpcWorker worker = DurableTaskSchedulerWorkerExtensions + .createWorkerBuilder(connectionString) + .addActivity(new TaskActivityFactory() { + @Override + public String getName() { + return "ShipOrder"; + } + + @Override + public TaskActivity create() { + return ctx -> { + String orderId = ctx.getInput(String.class); + + logger.info("[Shipper] Activity | Name=ShipOrder | Shipping order '{}'...", orderId); + + // Simulate shipping + String trackingNumber = "TRACK-" + orderId + "-" + ThreadLocalRandom.current().nextInt(1000, 9999); + String result = "Shipped with tracking " + trackingNumber; + + logger.info("[Shipper] Activity | Name=ShipOrder | Result: {}", result); + + return result; + }; + } + }) + .useWorkItemFilters() // auto-generate from registered tasks + .build(); + + logger.info("[Shipper] Starting... waiting for ShipOrder activity work items only."); + worker.start(); + + // Keep the process alive + logger.info("[Shipper] Worker started. Press Ctrl+C to exit."); + Thread.currentThread().join(); + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/src/main/resources/logback.xml b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/src/main/resources/logback.xml new file mode 100644 index 0000000..7f2565c --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/shipper-worker/src/main/resources/logback.xml @@ -0,0 +1,14 @@ + + + + + %d{HH:mm:ss} %-5level %logger{36} - %msg%n + + + + + + + + + diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/Dockerfile b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/Dockerfile new file mode 100644 index 0000000..054d320 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/Dockerfile @@ -0,0 +1,17 @@ +FROM eclipse-temurin:21-jdk AS build +WORKDIR /project + +COPY gradlew gradlew +COPY gradle/ gradle/ +COPY build.gradle build.gradle +COPY settings.gradle settings.gradle +COPY shared/ shared/ +COPY validator-worker/ validator-worker/ + +RUN chmod +x gradlew && ./gradlew :validator-worker:installDist --no-daemon + +FROM eclipse-temurin:21-jre +WORKDIR /app +COPY --from=build /project/validator-worker/build/install/validator-worker . + +ENTRYPOINT ["bin/validator-worker"] diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/build.gradle b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/build.gradle new file mode 100644 index 0000000..47f4653 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/build.gradle @@ -0,0 +1,7 @@ +application { + mainClass = 'io.durabletask.samples.ValidatorWorker' +} + +dependencies { + implementation project(':shared') +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/src/main/java/io/durabletask/samples/ValidatorWorker.java b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/src/main/java/io/durabletask/samples/ValidatorWorker.java new file mode 100644 index 0000000..607a407 --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/src/main/java/io/durabletask/samples/ValidatorWorker.java @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. +package io.durabletask.samples; + +import com.microsoft.durabletask.*; +import com.microsoft.durabletask.azuremanaged.DurableTaskSchedulerWorkerExtensions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * Validator Worker — runs only the {@code ValidateOrder} activity. + * + *

This worker registers a single activity and enables auto-generated work item filters. + * DTS will route only {@code ValidateOrder} activity work items to this worker. + * It will never receive orchestration or other activity work items. + */ +public final class ValidatorWorker { + private static final Logger logger = LoggerFactory.getLogger(ValidatorWorker.class); + + public static void main(String[] args) throws IOException, InterruptedException { + String connectionString = ConnectionHelper.getConnectionString(); + + logger.info("[Validator] Connection: {}", connectionString); + logger.info("[Validator] This worker registers ONLY the ValidateOrder activity."); + + // Build the worker with only the ValidateOrder activity registered. + // useWorkItemFilters() auto-generates filters from the registry, so this worker + // will ONLY receive ValidateOrder activity work items. + DurableTaskGrpcWorker worker = DurableTaskSchedulerWorkerExtensions + .createWorkerBuilder(connectionString) + .addActivity(new TaskActivityFactory() { + @Override + public String getName() { + return "ValidateOrder"; + } + + @Override + public TaskActivity create() { + return ctx -> { + String orderId = ctx.getInput(String.class); + + logger.info("[Validator] Activity | Name=ValidateOrder | Validating order '{}'...", orderId); + + // Simulate validation + String result = "Order " + orderId + " is valid"; + + logger.info("[Validator] Activity | Name=ValidateOrder | Result: {}", result); + + return result; + }; + } + }) + .useWorkItemFilters() // auto-generate from registered tasks + .build(); + + logger.info("[Validator] Starting... waiting for ValidateOrder activity work items only."); + worker.start(); + + // Keep the process alive + logger.info("[Validator] Worker started. Press Ctrl+C to exit."); + Thread.currentThread().join(); + } +} diff --git a/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/src/main/resources/logback.xml b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/src/main/resources/logback.xml new file mode 100644 index 0000000..7f2565c --- /dev/null +++ b/samples/scenarios/WorkItemFilteringSplitActivitiesJava/validator-worker/src/main/resources/logback.xml @@ -0,0 +1,14 @@ + + + + + %d{HH:mm:ss} %-5level %logger{36} - %msg%n + + + + + + + + +