Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Harden drone sample #1029

Merged
merged 3 commits into from
Oct 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -100,10 +100,10 @@ It is of course also possible to instead use a separate standalone database such
Config to use H2 looks like this:

Scala
: @@snip [persistence.conf](/samples/grpc/local-drone-control-scala/src/main/resources/persistence.conf) { }
: @@snip [persistence-h2.conf](/samples/grpc/local-drone-control-scala/src/main/resources/persistence-h2.conf) { }

Java
: @@snip [persistence.conf](/samples/grpc/local-drone-control-java/src/main/resources/persistence.conf) { }
: @@snip [persistence-h2.conf](/samples/grpc/local-drone-control-java/src/main/resources/persistence-h2.conf) { }

In addition to the configuration, the following additional dependencies are needed in the project build:

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -315,4 +315,4 @@ grpcurl -d '{"drone_id":"drone1"}' -plaintext localhost:8101 central.drones.Dron

## What's next?

* Accept restaurant delivery orders in the restaurant-drone-deliveries-service
* Accept restaurant delivery orders in the restaurant-drone-deliveries-service
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
* Main for starting the local-drone-control as a cluster rather than a single self-contained node.
* Requires a separate database, start with config from local{1,2,3}.conf files for running as
* cluster locally.
*
* <p>This should be started with -Dconfig.resource=application-cluster.conf or
* `-Dconfig.resource=local1.conf`
*/
public class ClusteredMain {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import akka.Done;
import akka.actor.typed.ActorRef;
import akka.actor.typed.Behavior;
import akka.actor.typed.SupervisorStrategy;
import akka.actor.typed.javadsl.ActorContext;
import akka.actor.typed.javadsl.Behaviors;
import akka.cluster.sharding.typed.javadsl.EntityTypeKey;
Expand All @@ -15,6 +16,7 @@
import akka.persistence.typed.state.javadsl.Effect;
import akka.serialization.jackson.CborSerializable;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.time.Duration;
import java.time.Instant;
import java.util.*;

Expand Down Expand Up @@ -151,15 +153,20 @@ public DeliveryInProgress(String deliveryId, String droneId, Instant pickupTime)
EntityTypeKey.create(Command.class, "RestaurantDeliveries");

public static Behavior<Command> create() {
return Behaviors.setup(
context ->
new DeliveriesQueue(context, PersistenceId.of(EntityKey.name(), "DeliveriesQueue")));
return Behaviors.<Command>supervise(
Behaviors.setup(
context ->
new DeliveriesQueue(
context, PersistenceId.of(EntityKey.name(), "DeliveriesQueue"))))
.onFailure(SupervisorStrategy.restart());
}

private final ActorContext<Command> context;

public DeliveriesQueue(ActorContext<Command> context, PersistenceId persistenceId) {
super(persistenceId);
super(
persistenceId,
SupervisorStrategy.restartWithBackoff(Duration.ofMillis(100), Duration.ofSeconds(5), 0.1));
this.context = context;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import akka.actor.typed.ActorRef;
import akka.actor.typed.ActorSystem;
import akka.actor.typed.Behavior;
import akka.actor.typed.SupervisorStrategy;
import akka.cluster.sharding.typed.javadsl.ClusterSharding;
import akka.cluster.sharding.typed.javadsl.Entity;
import akka.cluster.sharding.typed.javadsl.EntityTypeKey;
Expand All @@ -17,6 +18,7 @@
import akka.persistence.typed.javadsl.EventSourcedBehavior;
import akka.serialization.jackson.CborSerializable;
import com.fasterxml.jackson.annotation.JsonCreator;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
Expand Down Expand Up @@ -116,7 +118,9 @@ public static Behavior<Command> create(String entityId) {
}

private Drone(String entityId) {
super(PersistenceId.of(ENTITY_KEY.name(), entityId));
super(
PersistenceId.of(ENTITY_KEY.name(), entityId),
SupervisorStrategy.restartWithBackoff(Duration.ofMillis(100), Duration.ofSeconds(5), 0.1));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
# Production configuration for running the local-drone-control service in Kubernetes,
# as a multi-node cluster and with a separate PostgreSQL database.

include "cluster"
include "grpc"
include "postgres"
include "persistence-postgres"

local-drone-control {
# consider setting this to a specific interface for your environment
grpc.interface = "0.0.0.0"
grpc.interface = ${?GRPC_INTERFACE}

nr-of-event-producers = 4
# unique identifier for the instance of local control, must be known up front by the cloud service
Expand All @@ -18,9 +14,6 @@ local-drone-control {
ask-timeout = 3s
}

akka.management.cluster.bootstrap.contact-point-discovery {
service-name = "local-drone-control"
discovery-method = kubernetes-api
required-contact-point-nr = 1
required-contact-point-nr = ${?REQUIRED_CONTACT_POINT_NR}
akka {
loglevel = DEBUG
}
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
include "h2-default-projection-schema.conf"
include "grpc"
include "persistence"

# Default config, used for running a single-node cluster that cannot scale out to many nodes, using H2 for
# persistence, started through local.drones.Main

akka {
actor.provider = cluster
loglevel = DEBUG
}
include "h2-default-projection-schema.conf"
include "grpc"
include "persistence-h2"

local-drone-control {
# unique identifier for the instance of local control, must be known up front by the cloud service
Expand All @@ -17,3 +12,22 @@ local-drone-control {

ask-timeout = 3s
}

akka {
loglevel = DEBUG
actor.provider = cluster
}

akka.remote.artery {
# single node cluster
canonical.hostname = "127.0.0.1"
canonical.port = 0
canonical.port = ${?REMOTE_PORT}
}

akka.cluster.sharding {
passivation {
strategy = default-strategy
active-entity-limit = 1000
}
}
Original file line number Diff line number Diff line change
@@ -1,24 +1,34 @@
akka {
actor.provider = cluster
}

remote.artery {
canonical.port = 2552
canonical.port = ${?REMOTE_PORT}
}
akka.remote.artery {
canonical.port = 2552
canonical.port = ${?REMOTE_PORT}
}

cluster {
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
akka.cluster {
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"

shutdown-after-unsuccessful-join-seed-nodes = 120s
shutdown-after-unsuccessful-join-seed-nodes = 120s

sharding {
least-shard-allocation-strategy.rebalance-absolute-limit = 20
passivation.strategy = default-strategy
sharding {
least-shard-allocation-strategy.rebalance-absolute-limit = 20
passivation {
strategy = default-strategy
active-entity-limit = 1000
}
}
}

akka.management {
http.port = 8558
http.port = ${?HTTP_MGMT_PORT}

management {
http.port = 8558
http.port = ${?HTTP_MGMT_PORT}
cluster.bootstrap.contact-point-discovery {
service-name = "local-drone-control"
discovery-method = kubernetes-api
required-contact-point-nr = 1
required-contact-point-nr = ${?REQUIRED_CONTACT_POINT_NR}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# through the local.drones.ClusteredMain and a separate PostgreSQL database
include "cluster"
include "grpc"
include "postgres"
include "persistence-postgres"

local-drone-control.grpc.interface = "127.0.0.1"
akka.remote.artery.canonical.hostname = "127.0.0.1"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
# Production configuration for running the local-drone-control service in Kubernetes,
# as a multi-node cluster and with a separate PostgreSQL database.

include "cluster"
include "grpc"
include "postgres"
include "persistence-postgres"

local-drone-control {
# consider setting this to a specific interface for your environment
grpc.interface = "0.0.0.0"
grpc.interface = ${?GRPC_INTERFACE}

nr-of-event-producers = 4
# unique identifier for the instance of local control, must be known up front by the cloud service
Expand All @@ -18,9 +14,6 @@ local-drone-control {
ask-timeout = 3s
}

akka.management.cluster.bootstrap.contact-point-discovery {
service-name = "local-drone-control"
discovery-method = kubernetes-api
required-contact-point-nr = 1
required-contact-point-nr = ${?REQUIRED_CONTACT_POINT_NR}
akka {
loglevel = DEBUG
}
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
include "h2-default-projection-schema.conf"
include "grpc"
include "persistence"

# Default config, used for running a single-node cluster that cannot scale out to many nodes, using H2 for
# persistence, started through local.drones.Main

akka {
actor.provider = cluster
loglevel = DEBUG
}
include "h2-default-projection-schema.conf"
include "grpc"
include "persistence-h2"

local-drone-control {
# unique identifier for the instance of local control, must be known up front by the cloud service
Expand All @@ -17,3 +12,22 @@ local-drone-control {

ask-timeout = 3s
}

akka {
loglevel = DEBUG
actor.provider = cluster
}

akka.remote.artery {
# single node cluster
canonical.hostname = "127.0.0.1"
canonical.port = 0
canonical.port = ${?REMOTE_PORT}
}

akka.cluster.sharding {
passivation {
strategy = default-strategy
active-entity-limit = 1000
}
}
Original file line number Diff line number Diff line change
@@ -1,24 +1,34 @@
akka {
actor.provider = cluster
}

remote.artery {
canonical.port = 2552
canonical.port = ${?REMOTE_PORT}
}
akka.remote.artery {
canonical.port = 2552
canonical.port = ${?REMOTE_PORT}
}

cluster {
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"
akka.cluster {
downing-provider-class = "akka.cluster.sbr.SplitBrainResolverProvider"

shutdown-after-unsuccessful-join-seed-nodes = 120s
shutdown-after-unsuccessful-join-seed-nodes = 120s

sharding {
least-shard-allocation-strategy.rebalance-absolute-limit = 20
passivation.strategy = default-strategy
sharding {
least-shard-allocation-strategy.rebalance-absolute-limit = 20
passivation {
strategy = default-strategy
active-entity-limit = 1000
}
}
}

akka.management {
http.port = 8558
http.port = ${?HTTP_MGMT_PORT}

management {
http.port = 8558
http.port = ${?HTTP_MGMT_PORT}
cluster.bootstrap.contact-point-discovery {
service-name = "local-drone-control"
discovery-method = kubernetes-api
required-contact-point-nr = 1
required-contact-point-nr = ${?REQUIRED_CONTACT_POINT_NR}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
# through the local.drones.ClusteredMain and a separate PostgreSQL database
include "cluster"
include "grpc"
include "postgres"
include "persistence-postgres"

local-drone-control.grpc.interface = "127.0.0.1"
akka.remote.artery.canonical.hostname = "127.0.0.1"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,14 @@ akka {
host = ${?DB_HOST}
# note: different port for running in parallel with db for restaurant-drone-deliveries-service
port = 5433
port = ${?DB_PORT}
database = "postgres"
database = ${?DB_DATABASE}
user = "postgres"
user = ${?DB_USER}
password = "postgres"
password = ${?DB_PASSWORD}
}
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ import akka.management.scaladsl.AkkaManagement
/**
* Main for starting the local-drone-control as a cluster rather than a single self-contained node. Requires
* a separate database, start with config from local{1,2,3}.conf files for running as cluster locally.
*
* This should be started with -Dconfig.resource=application-cluster.conf or `-Dconfig.resource=local1.conf`
*/
object ClusteredMain {

Expand Down
Loading