From 1ac498c7832d6a10bc0dfbd66b62ce8fca070a0a Mon Sep 17 00:00:00 2001 From: James Roper Date: Mon, 6 Apr 2020 15:35:44 +1000 Subject: [PATCH] Eventing support --- build.sbt | 12 +- .../cloudstate/graaltools/Substitutions.java | 36 - .../javasupport/impl/AnySupport.scala | 4 +- .../javasupport/impl/ReflectionHelper.scala | 36 +- .../action/AnnotationBasedActionSupport.scala | 23 +- .../javasupport/tck/JavaSupportTck.java | 13 + .../eventlogeventing/EventLogSubscriber.java | 83 +++ .../EventSourcedEntityOne.java | 43 ++ .../EventSourcedEntityTwo.java | 36 + .../model/eventlogeventing/JsonMessage.java | 30 + protocols/example/shoppingcart/products.proto | 48 ++ .../example/shoppingcart/projection.proto | 43 ++ .../example/shoppingcart/shoppingcart.proto | 1 - .../shoppingcart/shoppingcart.proto | 1 - .../frontend/cloudstate/entity_key.proto | 2 +- protocols/frontend/cloudstate/eventing.proto | 46 +- .../cloudstate/legacy_entity_key.proto | 30 + protocols/protocol/cloudstate/entity.proto | 11 +- .../tck/model/eventlogeventing.proto | 178 +++++ .../native-image.properties | 1 + .../reflect-config.json.conf | 6 + .../src/main/resources/application.conf | 19 +- .../CassandraProjectionSupport.scala | 38 ++ .../akka-actor-typed/native-image.properties | 1 + .../akka-actor-typed/reflect-config.json.conf | 10 + .../akka-actor/reflect-config.json.conf | 5 + .../native-image.properties | 1 + .../reflect-config.json.conf | 10 + .../reflect-config.json.conf | 8 + proxy/core/src/main/resources/in-memory.conf | 17 +- proxy/core/src/main/resources/reference.conf | 11 + .../TestAtLeastOnceFlowProjection.scala | 92 +++ .../proxy/EntityDiscoveryManager.scala | 84 ++- .../scala/io/cloudstate/proxy/Serve.scala | 2 - .../proxy/UserFunctionTypeSupport.scala | 5 +- .../proxy/eventing/EventLogEventing.scala | 137 ++++ .../proxy/eventing/EventingManager.scala | 631 +++++++++++++----- .../proxy/eventing/EventingSupport.scala | 114 ++++ .../proxy/eventing/GooglePubsubEventing.scala | 453 ++++++++----- .../proxy/eventing/ProjectionSupport.scala | 45 ++ .../eventsourced/EventSourcedEntity.scala | 19 +- .../EventSourcedSupportFactory.scala | 4 +- .../proxy/eventsourced/InMemJournal.scala | 220 ++++++ .../EventSourcedRestartSpec.scala | 2 +- .../EventSourcedInstrumentationSpec.scala | 2 +- .../jdbc/src/main/resources/jdbc-common.conf | 9 +- .../proxy/jdbc/SlickProjectionSupport.scala | 54 ++ .../reflect-config.json.conf | 4 + .../src/main/protos/pingpong/pingpong.proto | 16 +- .../loadgenerator/GenerateLoad.scala | 65 +- samples/js-shopping-cart/index.js | 9 +- samples/js-shopping-cart/package.json | 6 +- samples/js-shopping-cart/products.js | 69 ++ samples/js-shopping-cart/projection.js | 54 ++ samples/js-shopping-cart/user-function.desc | Bin 10863 -> 12873 bytes .../io/cloudstate/tck/CloudStateTCK.scala | 88 ++- .../cloudstate/testkit/InterceptService.scala | 15 +- .../action/InterceptActionService.scala | 160 +++++ .../InterceptEventSourcedService.scala | 7 + 59 files changed, 2708 insertions(+), 461 deletions(-) create mode 100644 java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventLogSubscriber.java create mode 100644 java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityOne.java create mode 100644 java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityTwo.java create mode 100644 java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/JsonMessage.java create mode 100644 protocols/example/shoppingcart/products.proto create mode 100644 protocols/example/shoppingcart/projection.proto create mode 100644 protocols/frontend/cloudstate/legacy_entity_key.proto create mode 100644 protocols/tck/cloudstate/tck/model/eventlogeventing.proto create mode 100644 proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/native-image.properties create mode 100644 proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/reflect-config.json.conf create mode 100644 proxy/cassandra/src/main/scala/io/cloudstate/proxy/cassandra/CassandraProjectionSupport.scala create mode 100644 proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/native-image.properties create mode 100644 proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/reflect-config.json.conf create mode 100644 proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/native-image.properties create mode 100644 proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/reflect-config.json.conf create mode 100644 proxy/core/src/main/scala/akka/projection/cloudstate/TestAtLeastOnceFlowProjection.scala create mode 100644 proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventLogEventing.scala create mode 100644 proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingSupport.scala create mode 100644 proxy/core/src/main/scala/io/cloudstate/proxy/eventing/ProjectionSupport.scala create mode 100644 proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemJournal.scala create mode 100644 proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickProjectionSupport.scala create mode 100644 samples/js-shopping-cart/products.js create mode 100644 samples/js-shopping-cart/projection.js create mode 100644 testkit/src/main/scala/io/cloudstate/testkit/action/InterceptActionService.scala diff --git a/build.sbt b/build.sbt index 9a739aeb6..553916953 100644 --- a/build.sbt +++ b/build.sbt @@ -47,6 +47,7 @@ val AkkaManagementVersion = "1.0.8" val AkkaPersistenceCassandraVersion = "0.102" val AkkaPersistenceJdbcVersion = "3.5.2" val AkkaPersistenceSpannerVersion = "1.0.0-RC4" +val AkkaProjectionsVersion = "1.0.0" val PrometheusClientVersion = "0.9.0" val ScalaTestVersion = "3.0.8" val ProtobufVersion = "3.11.4" // Note: sync with Protobuf version in Akka gRPC and ScalaPB @@ -77,6 +78,9 @@ def akkaDiscoveryDependency(name: String, excludeThese: ExclusionRule*) = def akkaPersistenceCassandraDependency(name: String, excludeThese: ExclusionRule*) = "com.typesafe.akka" %% name % AkkaPersistenceCassandraVersion excludeAll ((excludeTheseDependencies ++ excludeThese): _*) +def akkaProjectionsDependency(name: String, excludeThese: ExclusionRule*) = + "com.lightbend.akka" %% name % AkkaProjectionsVersion excludeAll ((excludeTheseDependencies ++ excludeThese): _*) + def common: Seq[Setting[_]] = automateHeaderSettings(Compile, Test) ++ Seq( headerMappings := headerMappings.value ++ Seq( de.heikoseeberger.sbtheader.FileType("proto") -> HeaderCommentStyle.cppStyleLineComment, @@ -388,6 +392,7 @@ lazy val `proxy-core` = (project in file("proxy/core")) akkaDependency("akka-stream"), akkaDependency("akka-slf4j"), akkaDependency("akka-discovery"), + akkaDependency("akka-cluster-typed"), akkaHttpDependency("akka-http"), akkaHttpDependency("akka-http-spray-json"), akkaHttpDependency("akka-http-core"), @@ -395,6 +400,9 @@ lazy val `proxy-core` = (project in file("proxy/core")) akkaDependency("akka-cluster-sharding", ExclusionRule("org.lmdbjava", "lmdbjava")), akkaManagementDependency("akka-management-cluster-bootstrap"), akkaDiscoveryDependency("akka-discovery-kubernetes-api"), + akkaProjectionsDependency("akka-projection-core"), + akkaProjectionsDependency("akka-projection-eventsourced"), + akkaProjectionsDependency("akka-projection-testkit"), // Needed for in memory support "com.google.protobuf" % "protobuf-java" % ProtobufVersion % "protobuf", "com.google.protobuf" % "protobuf-java-util" % ProtobufVersion, "org.scalatest" %% "scalatest" % ScalaTestVersion % Test, @@ -458,7 +466,8 @@ lazy val `proxy-cassandra` = (project in file("proxy/cassandra")) dependencyOverrides += "io.grpc" % "grpc-netty-shaded" % GrpcNettyShadedVersion, libraryDependencies ++= Seq( akkaPersistenceCassandraDependency("akka-persistence-cassandra", ExclusionRule("com.github.jnr")), - akkaPersistenceCassandraDependency("akka-persistence-cassandra-launcher") % Test + akkaPersistenceCassandraDependency("akka-persistence-cassandra-launcher") % Test, + "com.lightbend.akka" %% "akka-projection-cassandra" % AkkaProjectionsVersion ), fork in run := true, mainClass in Compile := Some("io.cloudstate.proxy.CloudStateProxyMain"), @@ -479,6 +488,7 @@ lazy val `proxy-jdbc` = (project in file("proxy/jdbc")) dependencyOverrides += "io.grpc" % "grpc-netty-shaded" % GrpcNettyShadedVersion, libraryDependencies ++= Seq( "com.github.dnvriend" %% "akka-persistence-jdbc" % AkkaPersistenceJdbcVersion, + "com.lightbend.akka" %% "akka-projection-slick" % AkkaProjectionsVersion, "org.scalatest" %% "scalatest" % ScalaTestVersion % Test ), fork in run := true, diff --git a/graal-tools/src/main/java/io/cloudstate/graaltools/Substitutions.java b/graal-tools/src/main/java/io/cloudstate/graaltools/Substitutions.java index 160211c8c..debcd0871 100644 --- a/graal-tools/src/main/java/io/cloudstate/graaltools/Substitutions.java +++ b/graal-tools/src/main/java/io/cloudstate/graaltools/Substitutions.java @@ -22,42 +22,6 @@ import com.oracle.svm.core.annotate.*; -@TargetClass( - className = "io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess", - onlyWith = Existence.class) -final class Target_io_netty_util_internal_shaded_org_jctools_util_UnsafeRefArrayAccess { - @Alias - @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayIndexShift, declClass = Object[].class) - public static int REF_ELEMENT_SHIFT; -} - -@TargetClass(className = "io.netty.util.internal.CleanerJava6", onlyWith = Existence.class) -final class Target_io_netty_util_internal_CleanerJava6 { - @Alias - @RecomputeFieldValue( - kind = RecomputeFieldValue.Kind.FieldOffset, - declClassName = "java.nio.DirectByteBuffer", - name = "cleaner") - private static long CLEANER_FIELD_OFFSET; -} - -@TargetClass(className = "io.netty.util.internal.PlatformDependent", onlyWith = Existence.class) -final class Target_io_netty_util_internal_PlatformDependent { - @Alias - @RecomputeFieldValue(kind = RecomputeFieldValue.Kind.ArrayBaseOffset, declClass = byte[].class) - private static long ARRAY_BASE_OFFSET; -} - -@TargetClass(className = "io.netty.util.internal.PlatformDependent0", onlyWith = Existence.class) -final class Target_io_netty_util_internal_PlatformDependent0 { - @Alias - @RecomputeFieldValue( - kind = RecomputeFieldValue.Kind.FieldOffset, - declClassName = "java.nio.Buffer", - name = "address") - private static long ADDRESS_FIELD_OFFSET; -} - @TargetClass(className = "org.agrona.concurrent.AbstractConcurrentArrayQueue") final class Target_org_agrona_concurrent_AbstractConcurrentArrayQueue { @Alias diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala index 626d080f5..df6e6202d 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/AnySupport.scala @@ -122,7 +122,7 @@ object AnySupport { .asInstanceOf[Seq[(String, Primitive[Any])]] .toMap - private final val objectMapper = new ObjectMapper() + final val objectMapper = new ObjectMapper() private def primitiveToBytes[T](primitive: Primitive[T], value: T): ByteString = if (value != primitive.defaultValue) { @@ -178,6 +178,8 @@ object AnySupport { descriptor.getDependencies.asScala.toSeq ++ descriptor.getPublicDependencies.asScala) } } + + def extractBytes(bytes: ByteString): ByteString = bytesToPrimitive(BytesPrimitive, bytes) } class AnySupport(descriptors: Array[Descriptors.FileDescriptor], diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala index e2e7851f7..d652ad1f3 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/ReflectionHelper.scala @@ -20,11 +20,13 @@ import java.lang.annotation.Annotation import java.lang.reflect.{AccessibleObject, Executable, Member, Method, ParameterizedType, Type, WildcardType} import java.util.Optional +import akka.NotUsed import io.cloudstate.javasupport.{ CloudEvent, Context, EntityContext, EntityId, + Jsonable, Metadata, MetadataContext, ServiceCallFactory @@ -32,6 +34,7 @@ import io.cloudstate.javasupport.{ import com.google.protobuf.{Any => JavaPbAny} import scala.reflect.ClassTag +import scala.runtime.BoxedUnit /** * How we do reflection: @@ -208,13 +211,12 @@ private[impl] object ReflectionHelper { verifyAtMostOneMainArgument("CommandHandler", method, parameters) - parameters.foreach { - case MainArgumentParameterHandler(inClass) if !inClass.isAssignableFrom(serviceMethod.inputType.typeClass) => - throw new RuntimeException( - s"Incompatible command class $inClass for command $name, expected ${serviceMethod.inputType.typeClass}" - ) - case _ => - } + val mainArgumentDecoder: JavaPbAny => AnyRef = parameters + .collectFirst { + case MainArgumentParameterHandler(inClass) => + getMainArgumentDecoder(name, inClass, serviceMethod.inputType) + } + .getOrElse(_ => NotUsed) private def serialize(result: AnyRef) = JavaPbAny @@ -249,13 +251,31 @@ private[impl] object ReflectionHelper { } def invoke(obj: AnyRef, command: JavaPbAny, context: CommandContext): Optional[JavaPbAny] = { - val decodedCommand = serviceMethod.inputType.parseFrom(command.getValue).asInstanceOf[AnyRef] + val decodedCommand = mainArgumentDecoder(command) val ctx = InvocationContext(decodedCommand, context) val result = method.invoke(obj, parameters.map(_.apply(ctx)): _*) handleResult(result) } } + def getMainArgumentDecoder(name: String, actualType: Class[_], pbType: ResolvedType[_]): JavaPbAny => AnyRef = + if (actualType.isAssignableFrom(pbType.typeClass)) { pbAny => + pbType.parseFrom(pbAny.getValue).asInstanceOf[AnyRef] + } else if (pbType.typeClass.equals(classOf[JavaPbAny]) && actualType.getAnnotation(classOf[Jsonable]) != null) { + val reader = AnySupport.objectMapper.readerFor(actualType) + pbAny => { + if (pbAny.getTypeUrl.startsWith(AnySupport.CloudStateJson)) { + reader.readValue(AnySupport.extractBytes(pbAny.getValue).newInput()).asInstanceOf[AnyRef] + } else { + throw new RuntimeException( + s"Don't know how to deserialize protobuf Any type with type URL ${pbAny.getTypeUrl} " + ) + } + } + } else { + throw new RuntimeException(s"Incompatible input class $actualType for call $name, expected ${pbType.typeClass}") + } + def getRawType(t: Type): Class[_] = t match { case clazz: Class[_] => clazz case pt: ParameterizedType => getRawType(pt.getRawType) diff --git a/java-support/src/main/scala/io/cloudstate/javasupport/impl/action/AnnotationBasedActionSupport.scala b/java-support/src/main/scala/io/cloudstate/javasupport/impl/action/AnnotationBasedActionSupport.scala index 92781799f..4ac60f626 100644 --- a/java-support/src/main/scala/io/cloudstate/javasupport/impl/action/AnnotationBasedActionSupport.scala +++ b/java-support/src/main/scala/io/cloudstate/javasupport/impl/action/AnnotationBasedActionSupport.scala @@ -21,7 +21,7 @@ import java.util.concurrent.{CompletableFuture, CompletionStage} import akka.NotUsed import akka.stream.{javadsl, Materializer} -import akka.stream.javadsl.{AsPublisher, Source} +import akka.stream.javadsl.Source import akka.stream.scaladsl.{JavaFlowSupport, Sink} import com.google.protobuf.{Descriptors, Any => JavaPbAny} import io.cloudstate.javasupport.action._ @@ -33,7 +33,6 @@ import io.cloudstate.javasupport.impl.{ ResolvedServiceMethod, ResolvedType } -import io.cloudstate.javasupport.Metadata /** * Annotation based implementation of the [[ActionHandler]]. @@ -247,23 +246,19 @@ private object ActionReflection { ReflectionHelper.getRawType(parameterType) match { case envelope if envelope == classOf[MessageEnvelope[_]] => val messageType = ReflectionHelper.getFirstParameter(parameterType) - if (messageType != resolvedType.typeClass) { - throw new RuntimeException( - s"Incompatible message class $messageType for call $method, expected ${resolvedType.typeClass}" - ) - } else { envelope => + val decoder = ReflectionHelper.getMainArgumentDecoder(method, messageType, resolvedType) + + { envelope => MessageEnvelope.of( - resolvedType.parseFrom(envelope.payload.getValue).asInstanceOf[AnyRef], + decoder(envelope.payload), envelope.metadata ) } case payload => - if (payload != resolvedType.typeClass) { - throw new RuntimeException( - s"Incompatible message class $payload for call $method, expected ${resolvedType.typeClass}" - ) - } else { envelope => - resolvedType.parseFrom(envelope.payload.getValue).asInstanceOf[AnyRef] + val decoder = ReflectionHelper.getMainArgumentDecoder(method, payload, resolvedType) + + { envelope => + decoder(envelope.payload) } } } diff --git a/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/JavaSupportTck.java b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/JavaSupportTck.java index deca99f75..bea2d1d33 100644 --- a/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/JavaSupportTck.java +++ b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/JavaSupportTck.java @@ -18,6 +18,7 @@ import com.example.valueentity.shoppingcart.Shoppingcart; import io.cloudstate.javasupport.CloudState; +import io.cloudstate.javasupport.tck.model.eventlogeventing.EventLogSubscriber; import io.cloudstate.javasupport.tck.model.valuebased.ValueEntityTckModelEntity; import io.cloudstate.javasupport.tck.model.valuebased.ValueEntityTwoEntity; import io.cloudstate.javasupport.tck.model.action.ActionTckModelBehavior; @@ -29,6 +30,7 @@ import io.cloudstate.samples.shoppingcart.ShoppingCartEntity; import io.cloudstate.tck.model.Action; import io.cloudstate.tck.model.Crdt; +import io.cloudstate.tck.model.Eventlogeventing; import io.cloudstate.tck.model.Eventsourced; import io.cloudstate.tck.model.valueentity.Valueentity; @@ -66,6 +68,17 @@ public static final void main(String[] args) throws Exception { .registerEventSourcedEntity( EventSourcedTwoEntity.class, Eventsourced.getDescriptor().findServiceByName("EventSourcedTwo")) + .registerAction( + new EventLogSubscriber(), + Eventlogeventing.getDescriptor().findServiceByName("EventLogSubscriberModel")) + .registerEventSourcedEntity( + io.cloudstate.javasupport.tck.model.eventlogeventing.EventSourcedEntityOne.class, + Eventlogeventing.getDescriptor().findServiceByName("EventSourcedEntityOne"), + Eventlogeventing.getDescriptor()) + .registerEventSourcedEntity( + io.cloudstate.javasupport.tck.model.eventlogeventing.EventSourcedEntityTwo.class, + Eventlogeventing.getDescriptor().findServiceByName("EventSourcedEntityTwo"), + Eventlogeventing.getDescriptor()) .registerEventSourcedEntity( io.cloudstate.samples.eventsourced.shoppingcart.ShoppingCartEntity.class, com.example.shoppingcart.Shoppingcart.getDescriptor().findServiceByName("ShoppingCart"), diff --git a/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventLogSubscriber.java b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventLogSubscriber.java new file mode 100644 index 000000000..7cbd458e5 --- /dev/null +++ b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventLogSubscriber.java @@ -0,0 +1,83 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.javasupport.tck.model.eventlogeventing; + +import akka.NotUsed; +import akka.stream.javadsl.Source; +import io.cloudstate.javasupport.CloudEvent; +import io.cloudstate.javasupport.action.Action; +import io.cloudstate.javasupport.action.ActionContext; +import io.cloudstate.javasupport.action.ActionReply; +import io.cloudstate.javasupport.action.CallHandler; +import io.cloudstate.tck.model.EventLogSubscriberModel; +import io.cloudstate.tck.model.Eventlogeventing; + +@Action +public class EventLogSubscriber { + + @CallHandler + public ActionReply processEventOne( + ActionContext context, CloudEvent cloudEvent, Eventlogeventing.EventOne eventOne) { + return convert(context, cloudEvent, eventOne.getStep()); + } + + @CallHandler + public Source, NotUsed> processEventTwo( + ActionContext context, CloudEvent cloudEvent, Eventlogeventing.EventTwo eventTwo) { + return Source.from(eventTwo.getStepList()).map(step -> convert(context, cloudEvent, step)); + } + + @CallHandler + public Eventlogeventing.Response effect(Eventlogeventing.EffectRequest request) { + return Eventlogeventing.Response.newBuilder() + .setId(request.getId()) + .setMessage(request.getMessage()) + .build(); + } + + @CallHandler + public Eventlogeventing.Response processAnyEvent(JsonMessage jsonMessage, CloudEvent cloudEvent) { + return Eventlogeventing.Response.newBuilder() + .setId(cloudEvent.subject().orElse("")) + .setMessage(jsonMessage.message) + .build(); + } + + private ActionReply convert( + ActionContext context, CloudEvent cloudEvent, Eventlogeventing.ProcessStep step) { + String id = cloudEvent.subject().orElse(""); + if (step.hasReply()) { + return ActionReply.message( + Eventlogeventing.Response.newBuilder() + .setId(id) + .setMessage(step.getReply().getMessage()) + .build()); + } else if (step.hasForward()) { + return ActionReply.forward( + context + .serviceCallFactory() + .lookup(EventLogSubscriberModel.name, "Effect", Eventlogeventing.EffectRequest.class) + .createCall( + Eventlogeventing.EffectRequest.newBuilder() + .setId(id) + .setMessage(step.getForward().getMessage()) + .build())); + } else { + throw new RuntimeException("No reply or forward"); + } + } +} diff --git a/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityOne.java b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityOne.java new file mode 100644 index 000000000..b32245e85 --- /dev/null +++ b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityOne.java @@ -0,0 +1,43 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.javasupport.tck.model.eventlogeventing; + +import com.google.protobuf.Empty; +import io.cloudstate.javasupport.eventsourced.CommandHandler; +import io.cloudstate.javasupport.eventsourced.CommandContext; +import io.cloudstate.javasupport.eventsourced.EventHandler; +import io.cloudstate.javasupport.eventsourced.EventSourcedEntity; +import io.cloudstate.tck.model.Eventlogeventing; + +@EventSourcedEntity(persistenceId = "eventlogeventing-one") +public class EventSourcedEntityOne { + @CommandHandler + public Empty emitEvent(Eventlogeventing.EmitEventRequest event, CommandContext ctx) { + if (event.hasEventOne()) { + ctx.emit(event.getEventOne()); + } else { + ctx.emit(event.getEventTwo()); + } + return Empty.getDefaultInstance(); + } + + @EventHandler + public void handle(Eventlogeventing.EventOne event) {} + + @EventHandler + public void handle(Eventlogeventing.EventTwo event) {} +} diff --git a/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityTwo.java b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityTwo.java new file mode 100644 index 000000000..086902910 --- /dev/null +++ b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/EventSourcedEntityTwo.java @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.javasupport.tck.model.eventlogeventing; + +import com.google.protobuf.Empty; +import io.cloudstate.javasupport.eventsourced.CommandHandler; +import io.cloudstate.javasupport.eventsourced.CommandContext; +import io.cloudstate.javasupport.eventsourced.EventHandler; +import io.cloudstate.javasupport.eventsourced.EventSourcedEntity; +import io.cloudstate.tck.model.Eventlogeventing; + +@EventSourcedEntity(persistenceId = "eventlogeventing-two") +public class EventSourcedEntityTwo { + @CommandHandler + public Empty emitJsonEvent(Eventlogeventing.JsonEvent event, CommandContext ctx) { + ctx.emit(new JsonMessage(event.getMessage())); + return Empty.getDefaultInstance(); + } + + @EventHandler + public void handle(JsonMessage message) {} +} diff --git a/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/JsonMessage.java b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/JsonMessage.java new file mode 100644 index 000000000..8867fc9d3 --- /dev/null +++ b/java-support/tck/src/main/java/io/cloudstate/javasupport/tck/model/eventlogeventing/JsonMessage.java @@ -0,0 +1,30 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.javasupport.tck.model.eventlogeventing; + +import io.cloudstate.javasupport.Jsonable; + +@Jsonable +public class JsonMessage { + public JsonMessage(String message) { + this.message = message; + } + + public JsonMessage() {} + + public String message; +} diff --git a/protocols/example/shoppingcart/products.proto b/protocols/example/shoppingcart/products.proto new file mode 100644 index 000000000..5f0e48c33 --- /dev/null +++ b/protocols/example/shoppingcart/products.proto @@ -0,0 +1,48 @@ +// Copyright 2019 Lightbend Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/empty.proto"; +import "cloudstate/entity_key.proto"; + +package com.example.shoppingcart; + +option go_package = "tck/shoppingcart"; + +message UpdateCartQuantityRequest { + string product_id = 1 [(.cloudstate.entity_key) = true]; + string user_id = 2; + int32 quantity = 3; +} + +message RemoveProductFromCartRequest { + string product_id = 1 [(.cloudstate.entity_key) = true]; + string user_id = 2; +} + +message GetProductRequest { + string product_id = 1 [(.cloudstate.entity_key) = true]; +} + +message Product { + int32 total_quantities = 1; + int32 total_carts = 2; +} + +service ShoppingCartProducts { + rpc UpdateCartQuantity (UpdateCartQuantityRequest) returns (google.protobuf.Empty); + rpc RemoveProductFromCart (RemoveProductFromCartRequest) returns (google.protobuf.Empty); + rpc GetProduct (GetProductRequest) returns (Product); +} diff --git a/protocols/example/shoppingcart/projection.proto b/protocols/example/shoppingcart/projection.proto new file mode 100644 index 000000000..bf315af56 --- /dev/null +++ b/protocols/example/shoppingcart/projection.proto @@ -0,0 +1,43 @@ +// Copyright 2019 Lightbend Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is the public API offered by the shopping cart entity. +syntax = "proto3"; + +import "google/protobuf/empty.proto"; +import "cloudstate/eventing.proto"; +import "shoppingcart/persistence/domain.proto"; + +package com.example.shoppingcart; + +option go_package = "tck/shoppingcart"; + +service ShoppingCartProjection { + rpc HandleItemAdded (com.example.shoppingcart.persistence.ItemAdded) returns (google.protobuf.Empty) { + option (.cloudstate.eventing) = { + in: { + event_log: "shopping-cart" + } + }; + } + + rpc HandleItemRemoved (com.example.shoppingcart.persistence.ItemRemoved) returns (google.protobuf.Empty) { + option (.cloudstate.eventing) = { + in: { + event_log: "shopping-cart" + } + }; + } + +} diff --git a/protocols/example/shoppingcart/shoppingcart.proto b/protocols/example/shoppingcart/shoppingcart.proto index fbc008e41..d5333f6d1 100644 --- a/protocols/example/shoppingcart/shoppingcart.proto +++ b/protocols/example/shoppingcart/shoppingcart.proto @@ -58,7 +58,6 @@ service ShoppingCart { post: "/cart/{user_id}/items/add", body: "*", }; - option (.cloudstate.eventing).in = "items"; } rpc RemoveItem(RemoveLineItem) returns (google.protobuf.Empty) { diff --git a/protocols/example/valueentity/shoppingcart/shoppingcart.proto b/protocols/example/valueentity/shoppingcart/shoppingcart.proto index f5ef39b8f..4057a446e 100644 --- a/protocols/example/valueentity/shoppingcart/shoppingcart.proto +++ b/protocols/example/valueentity/shoppingcart/shoppingcart.proto @@ -69,7 +69,6 @@ service ShoppingCart { post: "/ve/cart/{user_id}/items/add", body: "*", }; - option (.cloudstate.eventing).in = "items"; } rpc RemoveItem(RemoveLineItem) returns (google.protobuf.Empty) { diff --git a/protocols/frontend/cloudstate/entity_key.proto b/protocols/frontend/cloudstate/entity_key.proto index e2129a190..8d02120a5 100644 --- a/protocols/frontend/cloudstate/entity_key.proto +++ b/protocols/frontend/cloudstate/entity_key.proto @@ -26,5 +26,5 @@ option java_package = "io.cloudstate"; option go_package = "github.com/cloudstateio/go-support/cloudstate;cloudstate"; extend google.protobuf.FieldOptions { - bool entity_key = 50002; + bool entity_key = 1080; } diff --git a/protocols/frontend/cloudstate/eventing.proto b/protocols/frontend/cloudstate/eventing.proto index 4ca2c48a7..69f5edcc3 100644 --- a/protocols/frontend/cloudstate/eventing.proto +++ b/protocols/frontend/cloudstate/eventing.proto @@ -25,11 +25,51 @@ option java_package = "io.cloudstate"; option java_multiple_files = true; option java_outer_classname = "EventsProto"; +// Eventing configuration for a gRPC method. message Eventing { - string in = 1; - string out = 2; // Special value "discard" means do not publish + // The event source in configuration. + EventSource in = 1; + + // The event destination out configuration. + // + // Optional, if unset, messages out will not be published anywhere. + EventDestination out = 2; +} + +// Event source configuration +message EventSource { + + // The consumer group id. + // + // By default, all rpc methods on a given service with the same source will be part of the same virtual consumer + // group, messages will be routed to the different methods by type. This can be used to override that, if you want + // multiple methods to act as independent consumers of the same source (ie, if you want the same event to be + // published to each consumer) then give each consumer a unique name. + // + // Note that this does depend on the event source supporting multiple consumer groups. Queue based event sources + // may not support this. + string consumer_group = 1; + + oneof source { + + // A topic source. + // + // This will consume events from the given topic name. + string topic = 2; + + // An event log source. + // + // This will consume events from the given event log with the given persistence id. + string event_log = 3; + } +} + +message EventDestination { + oneof destination { + string topic = 1; + } } extend google.protobuf.MethodOptions { - Eventing eventing = 50003; + Eventing eventing = 1081; } diff --git a/protocols/frontend/cloudstate/legacy_entity_key.proto b/protocols/frontend/cloudstate/legacy_entity_key.proto new file mode 100644 index 000000000..24b6735d3 --- /dev/null +++ b/protocols/frontend/cloudstate/legacy_entity_key.proto @@ -0,0 +1,30 @@ +// Copyright 2019 Lightbend Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Extension for specifying which field in a message is to be considered an +// entity key, for the purposes associating gRPC calls with entities and +// sharding. + +syntax = "proto3"; + +import "google/protobuf/descriptor.proto"; + +package cloudstate; + +option java_package = "io.cloudstate"; +option go_package = "github.com/cloudstateio/go-support/cloudstate/;cloudstate"; + +extend google.protobuf.FieldOptions { + bool legacy_entity_key = 50002; +} diff --git a/protocols/protocol/cloudstate/entity.proto b/protocols/protocol/cloudstate/entity.proto index a4df3a0ca..8b7674fe4 100644 --- a/protocols/protocol/cloudstate/entity.proto +++ b/protocols/protocol/cloudstate/entity.proto @@ -95,6 +95,7 @@ message MetadataEntry { // A reply to the sender. message Reply { + // The reply payload google.protobuf.Any payload = 1; @@ -103,21 +104,25 @@ message Reply { // Not all transports support per message metadata, for example, gRPC doesn't. The Cloudstate proxy MAY ignore the // metadata in this case, or it MAY lift the metadata into another place, for example, in gRPC, a unary call MAY // have its reply metadata placed in the headers of the HTTP response, or the first reply to a streamed call MAY - // have its metadata placed in the headers of the HTTP response. + // if its metadata placed in the headers of the HTTP response. // // If the metadata is ignored, the Cloudstate proxy MAY notify the user function by sending an error message to the // EntityDiscovery.ReportError gRPC call. - Metadata metadata = 2; + cloudstate.Metadata metadata = 2; } // Forwards handling of this request to another entity. message Forward { + // The name of the service to forward to. string service_name = 1; + // The name of the command. string command_name = 2; + // The payload. google.protobuf.Any payload = 3; + // The metadata to include with the forward Metadata metadata = 4; } @@ -172,7 +177,7 @@ message Command { // The command payload. google.protobuf.Any payload = 4; - // Whether the command is streamed or not + // Whether the command is streamed or not. bool streamed = 5; // The command metadata. diff --git a/protocols/tck/cloudstate/tck/model/eventlogeventing.proto b/protocols/tck/cloudstate/tck/model/eventlogeventing.proto new file mode 100644 index 000000000..4accfcb2e --- /dev/null +++ b/protocols/tck/cloudstate/tck/model/eventlogeventing.proto @@ -0,0 +1,178 @@ +// Copyright 2019 Lightbend Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// == Cloudstate TCK model test for actions == +// + +syntax = "proto3"; + +package cloudstate.tck.model.eventlogeventing; + +option java_package = "io.cloudstate.tck.model"; +option go_package = "github.com/cloudstateio/go-support/tck/eventlogeventing;eventlogeventing"; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; +import "cloudstate/eventing.proto"; +import "cloudstate/entity_key.proto"; + +// +// The `EventLogSubscriberModel` service is an Action that should be implemented in the following ways: +// +// - The `ProcessEventOne` method receives a `EventOne` message, and must behave according to the passed in step. +// - The `ProcessEventTwo` method receives a `EventTwo` message, and must return a stream of responses, one for each +// incoming step. +// - The `Effect` method receives an `EffectRequest` message and must respond with a `Response` that contains the id +// from the effect message. +// - The `ProcessAnyEvent` method receives a `google.protobuf.Any`, which will contain JSON serialized according to +// the Cloudstate JSON serialization conventions, with a `type_url` of `json.cloudstate.io/JsonEvent`. The contents +// of the JSON message will be a JSON object with a single message property, and the call must respond with this +// message in the `Response`. +// - Forwarding and side effects should be made to the `Effect` call. +// - The response to the Process calls, or the effects and forwards emitted by them, must contain the message field +// from the incoming events, along with the id read from the CloudEvent metadata source property. +// +service EventLogSubscriberModel { + rpc ProcessEventOne(EventOne) returns (Response) { + option (.cloudstate.eventing) = { + in: { + event_log: "eventlogeventing-one" + } + }; + }; + + rpc ProcessEventTwo(EventTwo) returns (stream Response) { + option (.cloudstate.eventing) = { + in: { + event_log: "eventlogeventing-one" + } + }; + }; + + rpc Effect(EffectRequest) returns (Response); + + rpc ProcessAnyEvent(google.protobuf.Any) returns (Response) { + option (.cloudstate.eventing) = { + in: { + event_log: "eventlogeventing-two" + } + }; + }; +} + + +// +// The `EventSourcedEntityOne` service is an event sourced entity that should be implemented in the following ways: +// +// - The `EmitEvent` method should emit the event in the `EmitEventRequest` method as a protobuf serialized event. +// - The persistence id for it must be `eventlogeventing-one`. +// +service EventSourcedEntityOne { + rpc EmitEvent(EmitEventRequest) returns (google.protobuf.Empty); +} + +// +// The `EventSourcedEntityTwo` service is an event sourced entity that should be implemented in the following ways: +// +// - The `EmitJsonMethod` method should emit an event serialised as JSON. This event should: +// - Contain a single `message` property with the value of the `message` field in `JsonEvent`. +// - Be serialized according to the Cloudstate JSON serialization conventions - that is, with the JSON serialized to +// bytes, then placed into a protobuf message with a single bytes field with field number 1. +// - Have a type_url of `json.cloudstate.io/JsonEvent`. +// - The persistence id for it must be `eventlogeventing-two`. +// +service EventSourcedEntityTwo { + rpc EmitJsonEvent(JsonEvent) returns (google.protobuf.Empty); +} + +// +// An `EmitEventRequest` is received by the `EventSourcedEntityOne` entity to instruct it to emit either an `EventOne` +// or an `EventTwo`. +// +message EmitEventRequest { + string id = 1 [(.cloudstate.entity_key) = true]; + oneof event { + EventOne event_one = 2; + EventTwo event_two = 3; + } +} + +// +// An `EventOne` is an event emitted by the `EventSourcedEntityOne` entity and subscribed to by +// `EventLogSubscriberModel`. +// +message EventOne { + ProcessStep step = 2; +} + +// +// An `EventTwo` is an event emitted by the `EventSourcedEntityOne` entity and subscribed to by +// `EventLogSubscriberModel`. +// +message EventTwo { + repeated ProcessStep step = 2; +} + +// +// A `JsonEvent` is an event emitted by the `EventSourcedEntityTwo` entity and subscribed to by +// `EventLogSubscriberModel`. +// +message JsonEvent { + string id = 1 [(.cloudstate.entity_key) = true]; + string message = 2; +} + +// +// Each `ProcessStep` is one of: +// +// - Reply: reply with the given message in a `Response`. +// - Forward: forward to another service, in place of replying with a `Response`. +// - SideEffect: add a side effect to the current reply, forward, or failure. +// +message ProcessStep { + oneof step { + Reply reply = 1; + Forward forward = 2; + } +} + +// +// Reply with a message in the response. +// +message Reply { + string message = 1; +} + +// +// Replace the response with a forward to `cloudstate.tck.model.ActionTwo/Call`. +// The payload must be an `OtherRequest` message with the given `message`. +// +message Forward { + string message = 1; +} + +// +// The `Response` message must contain the message from the corresponding reply step. +// +message Response { + string id = 1; + string message = 2; +} + +// The `EffectRequest` message must contain the id from the SideEffect or Forward. +message EffectRequest { + string id = 1; + string message = 2; +} \ No newline at end of file diff --git a/proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/native-image.properties b/proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/native-image.properties new file mode 100644 index 000000000..98d306764 --- /dev/null +++ b/proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/native-image.properties @@ -0,0 +1 @@ +Args = -H:ReflectionConfigurationResources=${.}/reflect-config.json diff --git a/proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/reflect-config.json.conf b/proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/reflect-config.json.conf new file mode 100644 index 000000000..7200e263a --- /dev/null +++ b/proxy/cassandra/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-cassandra/reflect-config.json.conf @@ -0,0 +1,6 @@ +[ +{ + name: "io.cloudstate.proxy.cassandra.CassandraProjectionSupport" + methods: [{name:"",parameterTypes: ["akka.actor.typed.ActorSystem"]}] +} +] \ No newline at end of file diff --git a/proxy/cassandra/src/main/resources/application.conf b/proxy/cassandra/src/main/resources/application.conf index 705c7266b..5c93b502a 100644 --- a/proxy/cassandra/src/main/resources/application.conf +++ b/proxy/cassandra/src/main/resources/application.conf @@ -6,7 +6,14 @@ akka.persistence { } cloudstate.proxy { - eventsourced-entity.journal-enabled = true + eventsourced-entity { + journal-enabled = true + read-journal = cassandra-read-journal + projection-support { + enabled = true + class = "io.cloudstate.proxy.cassandra.CassandraProjectionSupport" + } + } } cassandra-keyspace = "akka" @@ -37,3 +44,13 @@ cassandra-snapshot-store { } jmx-reporting-enabled = off } + +cassandra-read-journal { + keyspace = ${cassandra-keyspace} + contact-points = ${cassandra-contact-points} + authentication { + username = ${?cassandra-username} + password = ${?cassandra-password} + } + jmx-reporting-enabled = off +} diff --git a/proxy/cassandra/src/main/scala/io/cloudstate/proxy/cassandra/CassandraProjectionSupport.scala b/proxy/cassandra/src/main/scala/io/cloudstate/proxy/cassandra/CassandraProjectionSupport.scala new file mode 100644 index 000000000..23940d2e2 --- /dev/null +++ b/proxy/cassandra/src/main/scala/io/cloudstate/proxy/cassandra/CassandraProjectionSupport.scala @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.proxy.cassandra + +import akka.Done +import akka.actor.typed.ActorSystem +import akka.projection.cassandra.scaladsl.CassandraProjection +import akka.projection.{ProjectionContext, ProjectionId} +import akka.projection.scaladsl.{AtLeastOnceFlowProjection, SourceProvider} +import akka.stream.scaladsl.FlowWithContext +import io.cloudstate.proxy.eventing.ProjectionSupport + +import scala.concurrent.Future + +class CassandraProjectionSupport(implicit system: ActorSystem[_]) extends ProjectionSupport { + override def create[Offset, Envelope]( + projectionId: ProjectionId, + sourceProvider: SourceProvider[Offset, Envelope], + flow: FlowWithContext[Envelope, ProjectionContext, Done, ProjectionContext, _] + ): AtLeastOnceFlowProjection[Offset, Envelope] = + CassandraProjection.atLeastOnceFlow(projectionId, sourceProvider, flow) + + override def prepare(): Future[Done] = CassandraProjection.createOffsetTableIfNotExists() +} diff --git a/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/native-image.properties b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/native-image.properties new file mode 100644 index 000000000..98d306764 --- /dev/null +++ b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/native-image.properties @@ -0,0 +1 @@ +Args = -H:ReflectionConfigurationResources=${.}/reflect-config.json diff --git a/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/reflect-config.json.conf b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/reflect-config.json.conf new file mode 100644 index 000000000..3f4824f5a --- /dev/null +++ b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor-typed/reflect-config.json.conf @@ -0,0 +1,10 @@ +[ +{ + name: "akka.actor.typed.internal.adapter.ActorSystemAdapter$LoadTypedExtensions$" + fields: [{name:"MODULE$"}] +}, +{ + name: "akka.actor.typed.receptionist.Receptionist$" + fields: [{name:"MODULE$"}] +} +] diff --git a/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor/reflect-config.json.conf b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor/reflect-config.json.conf index cfaed3153..6bd5dc65a 100644 --- a/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor/reflect-config.json.conf +++ b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-actor/reflect-config.json.conf @@ -192,4 +192,9 @@ { name: "akka.actor.Kill$" } +{ + # This is used when Akka typed is used. + name: "akka.dispatch.SingleConsumerOnlyUnboundedMailbox" + methods: [{name: "", parameterTypes: ["akka.actor.ActorSystem$Settings", "com.typesafe.config.Config"]}] +} ] diff --git a/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/native-image.properties b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/native-image.properties new file mode 100644 index 000000000..98d306764 --- /dev/null +++ b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/native-image.properties @@ -0,0 +1 @@ +Args = -H:ReflectionConfigurationResources=${.}/reflect-config.json diff --git a/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/reflect-config.json.conf b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/reflect-config.json.conf new file mode 100644 index 000000000..6a9e085bf --- /dev/null +++ b/proxy/core/src/graal/META-INF/native-image/com.typesafe.akka/akka-cluster-typed/reflect-config.json.conf @@ -0,0 +1,10 @@ +[ + { + name: "akka.cluster.typed.internal.receptionist.ClusterReceptionistConfigCompatChecker" + methods: [{name: "", parameterTypes: []}] + }, + { + name: "akka.cluster.typed.internal.receptionist.ClusterReceptionist$", + fields: [{name:"MODULE$"}] + } +] \ No newline at end of file diff --git a/proxy/core/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-core/reflect-config.json.conf b/proxy/core/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-core/reflect-config.json.conf index b79c3f816..437d6eb91 100644 --- a/proxy/core/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-core/reflect-config.json.conf +++ b/proxy/core/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-core/reflect-config.json.conf @@ -115,4 +115,12 @@ name: "io.cloudstate.proxy.autoscaler.ScalingDown" allPublicMethods: true } +{ + name: "akka.persistence.cloudstate.InmemReadJournal" + methods: [{name:"",parameterTypes: ["akka.actor.ExtendedActorSystem", "com.typesafe.config.Config"]}] +} +{ + name: "io.cloudstate.proxy.eventing.InMemoryProjectionSupport" + methods: [{name:"",parameterTypes: ["akka.actor.typed.ActorSystem"]}] +} ] diff --git a/proxy/core/src/main/resources/in-memory.conf b/proxy/core/src/main/resources/in-memory.conf index a4f38c70e..987126d77 100644 --- a/proxy/core/src/main/resources/in-memory.conf +++ b/proxy/core/src/main/resources/in-memory.conf @@ -2,8 +2,12 @@ include "cloudstate-common" akka.persistence { - journal.plugin = "akka.persistence.journal.inmem" - snapshot-store.plugin = inmem-snapshot-store + journal.plugin = inmem-journal + snapshot-store.plugin = inmem-snapshot-store +} + +inmem-journal { + class = "akka.persistence.cloudstate.InmemJournal" } inmem-snapshot-store { @@ -13,6 +17,11 @@ inmem-snapshot-store { cloudstate.proxy { eventsourced-entity { journal-enabled = true + read-journal = inmem-read-journal + projection-support { + enabled = true + class = "io.cloudstate.proxy.eventing.InMemoryProjectionSupport" + } } # Configuration for using an in-memory Value Entity persistence store @@ -21,3 +30,7 @@ cloudstate.proxy { persistence.store = "in-memory" } } + +inmem-read-journal { + class = "akka.persistence.cloudstate.InmemReadJournal" +} diff --git a/proxy/core/src/main/resources/reference.conf b/proxy/core/src/main/resources/reference.conf index b131de965..cadf9a68a 100644 --- a/proxy/core/src/main/resources/reference.conf +++ b/proxy/core/src/main/resources/reference.conf @@ -25,6 +25,17 @@ cloudstate.proxy { journal-enabled = false passivation-timeout = 30s // Keep short for testing purposes + + # The id of the read journal plugin + read-journal = "" + + # The projection support + projection-support { + enabled = false + + # Class of the projection support + class = "" + } } crdt-entity { diff --git a/proxy/core/src/main/scala/akka/projection/cloudstate/TestAtLeastOnceFlowProjection.scala b/proxy/core/src/main/scala/akka/projection/cloudstate/TestAtLeastOnceFlowProjection.scala new file mode 100644 index 000000000..001648dd2 --- /dev/null +++ b/proxy/core/src/main/scala/akka/projection/cloudstate/TestAtLeastOnceFlowProjection.scala @@ -0,0 +1,92 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package akka.projection.cloudstate + +import akka.Done +import akka.actor.typed.ActorSystem +import akka.projection.{ProjectionContext, ProjectionId, StatusObserver} +import akka.projection.internal.{ + AtLeastOnce, + FlowHandlerStrategy, + InternalProjection, + NoopStatusObserver, + RestartBackoffSettings, + SettingsImpl, + SingleHandlerStrategy +} +import akka.projection.scaladsl.{AtLeastOnceFlowProjection, Handler, SourceProvider} +import akka.projection.testkit.internal.{TestInMemoryOffsetStoreImpl, TestProjectionImpl} +import akka.projection.testkit.scaladsl.TestProjection +import akka.stream.scaladsl.FlowWithContext + +import scala.concurrent.duration.FiniteDuration + +/** + * This exists because we need TestProjection to implement AtLeastOnceFlowProjection + * (https://github.com/akka/akka-projection/issues/477), and the only way we can implement + * it is if we are in the akka package. + */ +class TestAtLeastOnceFlowProjection[Offset, Envelope] private (delegate: TestProjectionImpl[Offset, Envelope]) + extends AtLeastOnceFlowProjection[Offset, Envelope] + with SettingsImpl[TestAtLeastOnceFlowProjection[Offset, Envelope]] + with InternalProjection { + + override def withStatusObserver(observer: StatusObserver[Envelope]): TestAtLeastOnceFlowProjection[Offset, Envelope] = + new TestAtLeastOnceFlowProjection(delegate.withStatusObserver(observer)) + + override def withRestartBackoffSettings( + restartBackoff: RestartBackoffSettings + ): TestAtLeastOnceFlowProjection[Offset, Envelope] = + new TestAtLeastOnceFlowProjection(delegate.withRestartBackoffSettings(restartBackoff)) + + override def withSaveOffset(afterEnvelopes: Int, + afterDuration: FiniteDuration): TestAtLeastOnceFlowProjection[Offset, Envelope] = + new TestAtLeastOnceFlowProjection(delegate.withSaveOffset(afterEnvelopes, afterDuration)) + + override def withGroup(groupAfterEnvelopes: Int, + groupAfterDuration: FiniteDuration): TestAtLeastOnceFlowProjection[Offset, Envelope] = + new TestAtLeastOnceFlowProjection(delegate.withGroup(groupAfterEnvelopes, groupAfterDuration)) + + override def projectionId: ProjectionId = delegate.projectionId + + override def statusObserver: StatusObserver[Envelope] = delegate.statusObserver + + override private[projection] def mappedSource()(implicit system: ActorSystem[_]) = delegate.mappedSource() + + override private[projection] def actorHandlerInit[T] = delegate.actorHandlerInit + + override private[projection] def run()(implicit system: ActorSystem[_]) = delegate.run() + + override private[projection] def offsetStrategy = delegate.offsetStrategy +} + +object TestAtLeastOnceFlowProjection { + def apply[Offset, Envelope]( + projectionId: ProjectionId, + sourceProvider: SourceProvider[Offset, Envelope], + flow: FlowWithContext[Envelope, ProjectionContext, Done, ProjectionContext, _] + ): TestAtLeastOnceFlowProjection[Offset, Envelope] = + new TestAtLeastOnceFlowProjection( + new TestProjectionImpl(projectionId = projectionId, + sourceProvider = sourceProvider, + handlerStrategy = FlowHandlerStrategy(flow), + offsetStrategy = AtLeastOnce(afterEnvelopes = Some(1)), + statusObserver = NoopStatusObserver, + offsetStoreFactory = () => new TestInMemoryOffsetStoreImpl[Offset](), + startOffset = None) + ) +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala index a519143d3..610484f15 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/EntityDiscoveryManager.scala @@ -18,13 +18,22 @@ package io.cloudstate.proxy import akka.Done import akka.actor.{Actor, ActorLogging, CoordinatedShutdown, Props, Status} +import akka.actor.typed.{ActorSystem => TypedActorSystem} +import akka.actor.typed.scaladsl.adapter._ import akka.cluster.Cluster import akka.util.Timeout import akka.pattern.pipe -import akka.stream.scaladsl.RunnableGraph import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding +import akka.cluster.singleton.{ + ClusterSingletonManager, + ClusterSingletonManagerSettings, + ClusterSingletonProxy, + ClusterSingletonProxySettings +} import akka.grpc.GrpcClientSettings +import akka.persistence.query.PersistenceQuery +import akka.persistence.query.scaladsl.EventsByTagQuery import akka.stream.Materializer import com.google.protobuf.DescriptorProtos import com.google.protobuf.Descriptors.{FileDescriptor, ServiceDescriptor} @@ -34,12 +43,21 @@ import io.cloudstate.protocol.entity._ import io.cloudstate.protocol.crdt.Crdt import io.cloudstate.protocol.value_entity.ValueEntity import io.cloudstate.protocol.event_sourced.EventSourced +import io.cloudstate.proxy.autoscaler.{ + Autoscaler, + AutoscalerSettings, + ClusterMembershipFacadeImpl, + KubernetesDeploymentScaler, + NoAutoscaler, + NoScaler +} import io.cloudstate.proxy.action.ActionProtocolSupportFactory +import io.cloudstate.proxy.autoscaler.Autoscaler.ScalerFactory import io.cloudstate.proxy.crdt.CrdtSupportFactory +import io.cloudstate.proxy.eventing.{EventLogEventing, EventingManager, EventingSupport, ProjectionSupport} import io.cloudstate.proxy.eventsourced.EventSourcedSupportFactory import io.cloudstate.proxy.valueentity.EntitySupportFactory -import scala.concurrent.Future import scala.concurrent.duration._ object EntityDiscoveryManager { @@ -74,7 +92,7 @@ object EntityDiscoveryManager { numberOfShards = config.getInt("number-of-shards"), proxyParallelism = config.getInt("proxy-parallelism"), valueEntitySettings = new ValueEntitySettings(config), - eventSourcedSettings = new EventSourcedSettings(config), + eventSourcedSettings = EventSourcedSettings(config), crdtSettings = new CrdtSettings(config), config = config) } @@ -102,12 +120,22 @@ object EntityDiscoveryManager { } } - final case class EventSourcedSettings(journalEnabled: Boolean, passivationTimeout: Timeout) { - def this(config: Config) = { - this( - journalEnabled = config.getBoolean("eventsourced-entity.journal-enabled"), - passivationTimeout = Timeout(config.getDuration("eventsourced-entity.passivation-timeout").toMillis.millis) - ) + final case class EventSourcedSettings(journalEnabled: Boolean, + passivationTimeout: Timeout, + readJournal: String, + projectionSupport: Option[String]) + + object EventSourcedSettings { + def apply(config: Config): EventSourcedSettings = { + val esConfig = config.getConfig("eventsourced-entity") + val journalEnabled = esConfig.getBoolean("journal-enabled") + val passivationTimeout = Timeout(esConfig.getDuration("passivation-timeout").toMillis.millis) + val readJournal = esConfig.getString("read-journal") + val psConfig = esConfig.getConfig("projection-support") + val projectionSupport = if (psConfig.getBoolean("enabled")) { + Some(psConfig.getString("class")) + } else None + EventSourcedSettings(journalEnabled, passivationTimeout, readJournal, projectionSupport) } } @@ -170,6 +198,21 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)( else Map.empty } + private final val elAndPs: Option[(EventingSupport, ProjectionSupport)] = for { + projectionSupportClass <- config.eventSourcedSettings.projectionSupport + } yield { + val projectionSupport: ProjectionSupport = getClass.getClassLoader + .loadClass(projectionSupportClass) + .asSubclass(classOf[ProjectionSupport]) + .getDeclaredConstructor(classOf[TypedActorSystem[_]]) + .newInstance(system.toTyped) + + (new EventLogEventing(projectionSupport, config.eventSourcedSettings.readJournal, system.toTyped), + projectionSupport) + } + private final val eventLogEventing = elAndPs.map(_._1) + private final val projectionSupport = elAndPs.map(_._2) + entityDiscoveryClient.discover(EntityDiscoveryManager.proxyInfo(supportFactories.keys.toSeq)) pipeTo self val supportedProtocolMajorVersion: Int = BuildInfo.protocolMajorVersion @@ -219,14 +262,14 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)( .mkString(",")}" ) } - } + }.toList val router = new UserFunctionRouter(entities, entityDiscoveryClient) - /* - val eventSupport = EventingManager.createSupport(config.getConfig("eventing")) - */ - val route = Serve.createRoute(entities, router, entityDiscoveryClient, descriptors, Map.empty) + val topicSupport = EventingManager.createSupport(config.getConfig("eventing")) + val emitters = EventingManager.createEmitters(entities, topicSupport) + + val route = Serve.createRoute(entities, router, entityDiscoveryClient, descriptors, emitters) log.debug("Starting gRPC proxy") @@ -237,9 +280,11 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)( interface = config.httpInterface, port = config.httpPort ) pipeTo self + + EventingManager.startConsumers(router, entities, topicSupport, eventLogEventing, projectionSupport) } - context.become(binding(None)) + context.become(binding) } catch { case e @ EntityDiscoveryException(message) => @@ -255,10 +300,10 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)( private[this] final def extractService(serviceName: String, descriptor: FileDescriptor): Option[ServiceDescriptor] = { val (pkg, name) = Names.splitPrev(serviceName) - Some(descriptor).filter(_.getPackage == pkg).map(_.findServiceByName(name)) + Some(descriptor).filter(_.getPackage == pkg).flatMap(descriptor => Option(descriptor.findServiceByName(name))) } - private[this] final def binding(eventManager: Option[RunnableGraph[Future[Done]]]): Receive = { + private[this] final def binding: Receive = { case sb: ServerBinding => log.info(s"CloudState proxy online at ${sb.localAddress}") @@ -277,8 +322,6 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)( Http().shutdownAllConnectionPools().map(_ => Done) } - eventManager.foreach(_.run() pipeTo self) - context.become(running) case Status.Failure(cause) => // Failure to bind the HTTP server is fatal, terminate @@ -292,9 +335,6 @@ class EntityDiscoveryManager(config: EntityDiscoveryManager.Configuration)( private[this] final def running: Receive = { case Ready => sender ! true - case Status.Failure(cause) => // Failure in the eventing subsystem, terminate - log.error(cause, "Eventing failed") - system.terminate() case Done => system.terminate() // FIXME context.become(dead) } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala index 35d2ee1ae..f0702bc81 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/Serve.scala @@ -116,13 +116,11 @@ object Serve { .collect(Function.unlift(identity)) emitter match { - /* case Some(e) => handler.mapAsync(4) { case Reply(Some(payload), metadata, _) => e.emit(payload, method, metadata).map(_ => payload) } - */ case _ => handler.map(_.payload.get) } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala index ec44773c5..3cb25b451 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/UserFunctionTypeSupport.scala @@ -22,6 +22,7 @@ import com.google.protobuf.Descriptors.{MethodDescriptor, ServiceDescriptor} import com.google.protobuf.{ByteString, DynamicMessage} import io.cloudstate.protocol.entity.{Entity, Metadata} import io.cloudstate.entity_key.EntityKeyProto +import io.cloudstate.legacy_entity_key.LegacyEntityKeyProto import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply} import io.cloudstate.proxy.protobuf.Options @@ -100,7 +101,9 @@ private object EntityMethodDescriptor { final class EntityMethodDescriptor(val method: MethodDescriptor) { private[this] val keyFields = method.getInputType.getFields.iterator.asScala .filter( - field => EntityKeyProto.entityKey.get(Options.convertFieldOptions(field)) + field => + EntityKeyProto.entityKey.get(Options.convertFieldOptions(field)) || + LegacyEntityKeyProto.legacyEntityKey.get(Options.convertFieldOptions(field)) ) .toArray .sortBy(_.getIndex) diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventLogEventing.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventLogEventing.scala new file mode 100644 index 000000000..0021c555e --- /dev/null +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventLogEventing.scala @@ -0,0 +1,137 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.proxy.eventing + +import java.net.URLEncoder +import java.util.concurrent.atomic.AtomicBoolean + +import akka.Done +import akka.actor.Cancellable +import akka.actor.typed.ActorSystem +import akka.cluster.typed.{ClusterSingleton, SingletonActor} +import akka.persistence.query.{Offset, Sequence, TimeBasedUUID} +import akka.projection.{ProjectionBehavior, ProjectionContext, ProjectionId} +import akka.projection.scaladsl.{AtLeastOnceFlowProjection, SourceProvider} +import akka.projection.eventsourced.EventEnvelope +import akka.projection.eventsourced.scaladsl.EventSourcedProvider +import akka.stream.Materializer +import akka.stream.scaladsl.{Flow, FlowWithContext} +import io.cloudstate.eventing +import com.google.protobuf.any.{Any => ProtoAny} + +import scala.concurrent.duration._ + +class EventLogEventing(projection: ProjectionSupport, readJournalPluginId: String, system: ActorSystem[_])( + implicit mat: Materializer +) extends EventingSupport { + + private def sourceProvider(tag: String): SourceProvider[Offset, EventEnvelope[ProtoAny]] = + EventSourcedProvider + .eventsByTag[ProtoAny](system, readJournalPluginId = readJournalPluginId, tag = tag) + + override def name: String = "event-log" + + override def supportsSource: Boolean = true + + override def createSource(source: eventing.EventSource, serviceName: String): EventSource = + source.source match { + case eventing.EventSource.Source.EventLog(persistenceId) => + val consumerGroup = source.consumerGroup match { + case "" => serviceName + case cg => cg + } + + val actorName = URLEncoder.encode(s"$persistenceId/$consumerGroup", "utf-8") + + new EventSource { + override type SourceEventRef = ProjectionContext + + override def run(flow: Flow[SourceEvent[ProjectionContext], ProjectionContext, _]): Cancellable = { + val projectionActor = ClusterSingleton(system) + .init(SingletonActor(ProjectionBehavior(createProjection(consumerGroup, persistenceId, flow)), actorName)) + + new Cancellable { + private val running = new AtomicBoolean() + + override def cancel(): Boolean = + if (running.compareAndSet(false, true)) { + projectionActor ! ProjectionBehavior.Stop + true + } else { + false + } + + override def isCancelled: Boolean = running.get() + } + } + + } + + } + + override def supportsDestination: Boolean = false + + override def createDestination(destination: eventing.EventDestination): EventDestination = + throw new UnsupportedOperationException + + private def createProjection(consumerId: String, + tag: String, + flow: Flow[SourceEvent[ProjectionContext], ProjectionContext, _]) = + projection + .create( + ProjectionId(consumerId, tag), + sourceProvider(tag), + FlowWithContext[EventEnvelope[ProtoAny], ProjectionContext].via( + Flow[(EventEnvelope[ProtoAny], ProjectionContext)] + .map((transformEvent(tag) _).tupled) + .via(flow) + .map(ctx => Done -> ctx) + ) + ) + .withSaveOffset(20, 1.second) + .withRestartBackoff(3.seconds, 30.seconds, 0.2) + + private def transformEvent(tag: String)(event: EventEnvelope[ProtoAny], + context: ProjectionContext): SourceEvent[ProjectionContext] = { + val cloudEvent = event.event match { + case ProtoAny(typeUrl, bytes, _) => + val entityId = event.persistenceId.dropWhile(_ != '|').tail + CloudEvent( + id = offsetToMessageId(event.offset), + source = tag, + specversion = "1.0", + `type` = typeUrl, + datacontenttype = "application/" + EventingManager.ProtobufAnyMediaSubType, + dataschema = None, + subject = Some(entityId), + time = None, + data = Some(bytes) + ) + case other => + throw new IllegalStateException(s"Don't know how to handle event log message of type ${other.getClass}") + } + + SourceEvent(cloudEvent, context) + } + + private def offsetToMessageId(offset: Offset) = + offset match { + case Sequence(seq) => seq.toString + case TimeBasedUUID(uuid) => uuid.toString + case other => throw new IllegalArgumentException(s"Unsupported offset: $other") + } +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingManager.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingManager.scala index 5c6f7a169..9f7cfc467 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingManager.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingManager.scala @@ -16,207 +16,528 @@ package io.cloudstate.proxy.eventing +import java.time.Instant +import java.util.UUID +import java.util.concurrent.atomic.AtomicBoolean + import akka.{Done, NotUsed} -import akka.actor.Cancellable -import akka.stream.{FlowShape, Materializer, OverflowStrategy} -import akka.stream.scaladsl.{Flow, GraphDSL, Merge, Partition, RunnableGraph, Sink, Source} -import io.cloudstate.protocol.entity.{ClientAction, EntityDiscoveryClient, Failure, Reply, UserFunctionError} -import io.cloudstate.proxy.{Serve, UserFunctionRouter} +import akka.actor.{ActorSystem, Cancellable} +import akka.http.scaladsl.model.{MediaType, MediaTypes} +import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, MergeSequence, Partition, Source, Zip} +import akka.stream.{FlowShape, Materializer} +import com.google.protobuf.TextFormat.ParseException +import com.google.protobuf.{ByteString, CodedOutputStream, UnsafeByteOperations, WireFormat} +import com.typesafe.config.Config +import io.cloudstate.eventing.{EventDestination => EventDestinationProto, EventSource => EventSourceProto} +import io.cloudstate.protocol.entity.{ClientAction, Metadata, MetadataEntry} import io.cloudstate.proxy.EntityDiscoveryManager.ServableEntity -import io.cloudstate.proxy.protobuf.{Options, Types} -import io.cloudstate.proxy.entity.UserFunctionReply import io.cloudstate.eventing.{Eventing, EventingProto} import com.google.protobuf.any.{Any => ProtobufAny} import com.google.protobuf.Descriptors.MethodDescriptor +import io.cloudstate.proxy.UserFunctionRouter +import io.cloudstate.proxy.entity.UserFunctionReply +import io.cloudstate.proxy.protobuf.Options +import org.slf4j.LoggerFactory import scala.collection.JavaConverters._ -import scala.concurrent.Future -import com.typesafe.config.Config -import org.slf4j.LoggerFactory -import Serve.CommandHandler +import scala.concurrent.{ExecutionContext, Future} trait Emitter { - def emit(payload: ProtobufAny, method: MethodDescriptor): Boolean + def emit(payload: ProtobufAny, method: MethodDescriptor, metadata: Option[Metadata]): Future[Done] } object Emitters { - val ignore: Emitter = new Emitter { - override def emit(payload: ProtobufAny, method: MethodDescriptor): Boolean = false - } -} + val ignore: Emitter = (payload: ProtobufAny, method: MethodDescriptor, metadata: Option[Metadata]) => + Future.successful(Done) -/* Commented out temporarily while projection support is developed + def eventDestinationEmitter(eventDestination: EventDestination): Emitter = + new EventDestinationEmitter(eventDestination) -trait EventingSupport { -def createSource(sourceName: String, handler: CommandHandler): Source[UserFunctionCommand, Future[Cancellable]] -def createDestination(destinationName: String, handler: CommandHandler): Flow[ProtobufAny, AnyRef, NotUsed] + private class EventDestinationEmitter(eventDestination: EventDestination) extends Emitter { + override def emit(payload: ProtobufAny, method: MethodDescriptor, metadata: Option[Metadata]): Future[Done] = + eventDestination.emitSingle( + EventingManager.createDesintationEvent(payload, method.getService.getFullName, metadata) + ) + } } object EventingManager { -final val log = LoggerFactory.getLogger("EventingManager") + final val log = LoggerFactory.getLogger("EventingManager") + + final case class EventConsumer(eventSource: EventSourceProto, + entity: ServableEntity, + methods: Map[String, EventConsumerMethod], + outs: Vector[EventDestinationProto]) + + final case class EventConsumerMethod(eventing: Eventing, methodDescriptor: MethodDescriptor, outIndex: Option[Int]) + + private def createConsumers(entities: List[ServableEntity]): List[EventConsumer] = + entities.flatMap { entity => + val unindexedConsumers = entity.serviceDescriptor.getMethods.iterator.asScala + .foldLeft(Map.empty[EventSourceProto, EventConsumer]) { + case (map, method) => + EventingProto.eventing.get(Options.convertMethodOptions(method)) match { + case None => map + case Some(e) => + (e.in, e.out) match { + case (None, None) => map + case (Some(in), Some(out)) if in.source.topic.exists(out.destination.topic.contains) => + throw new IllegalStateException( + s"Endpoint [${method.getFullName}] has the same input topic as output topic [${in.source.topic.getOrElse("")}], this is not allowed." + ) + case (Some(in), _) => + map.get(in) match { + case Some(consumer) => + consumer.methods.get(method.getInputType.getFullName).foreach { conflict => + throw new IllegalStateException( + s"Endpoints [${conflict.methodDescriptor.getFullName}] and [${method.getFullName}] both subscribe to the same event source with the same input type. If you wish to define two independent consumers of the same event source, use the consumer_group annotation on the source to identify them." + ) + } + map.updated(in, + consumer.copy( + methods = consumer.methods.updated(method.getInputType.getFullName, + EventConsumerMethod(e, method, None)) + )) + case None => + map.updated( + in, + EventConsumer(in, + entity, + Map(method.getInputType.getFullName -> EventConsumerMethod(e, method, None)), + Vector.empty) + ) + } + } + } + } + .values -final case class EventMapping private (entity: ServableEntity, routes: Map[MethodDescriptor, Eventing]) + unindexedConsumers.map { consumer => + val outs = consumer.methods.values + .flatMap(_.eventing.out) + .toVector + .distinct -private val noEmitter = Future.successful(Emitters.ignore) + val indexedOuts = outs.zipWithIndex.toMap -// Contains all entities which has at least one endpoint which accepts events -def createEventMappings(entities: Seq[ServableEntity]): Seq[EventMapping] = - entities.flatMap { entity => - val endpoints = - entity.serviceDescriptor.getMethods.iterator.asScala.foldLeft(Map.empty[MethodDescriptor, Eventing]) { + val indexedMethods = consumer.methods.toList.map { + case (inputType, method) => (inputType, method.copy(outIndex = method.eventing.out.flatMap(indexedOuts.get))) + }.toMap + + consumer.copy(methods = indexedMethods, outs = outs) + } + } + + def createEmitters( + entities: List[ServableEntity], + topicSupport: Option[EventingSupport] + )(implicit system: ActorSystem, mat: Materializer): Map[String, Emitter] = { + val outs = entities + .flatMap(_.serviceDescriptor.getMethods.iterator().asScala) + .foldLeft(Map.empty[EventDestinationProto, List[MethodDescriptor]]) { case (map, method) => EventingProto.eventing.get(Options.convertMethodOptions(method)) match { case None => map - case Some(e) => - (e.in, e.out) match { - case (null, null) | ("", "") => map - case (in, out) if in == out => - throw new IllegalStateException( - s"Endpoint '${method.getFullName}' has the same input topic as output topic ('${in}'), this is not allowed." - ) - case (in, out) => - log.debug("EventingProto.events for {}: {} -> {}", - method.getFullName: AnyRef, - in: AnyRef, - out: AnyRef) - map.updated(method, e) + case Some(Eventing(_, Some(out), _)) => + map.get(out) match { + case Some(methods) => + map.updated(out, method :: methods) + case None => + map.updated(out, method :: Nil) } + case _ => map } } - if (endpoints.isEmpty) Nil - else List(EventMapping(entity, endpoints)) + outs.flatMap { + case (dest @ EventDestinationProto(EventDestinationProto.Destination.Topic(topic), _), methods) => + val emitter = topicSupport match { + case Some(support) => Emitters.eventDestinationEmitter(support.createDestination(dest)) + case None => + throw new IllegalArgumentException( + s"Service call [${methods.head.getFullName}] declares an event destination topic of [$topic], but not topic support is configured." + ) + } + methods.map(method => method.getFullName -> emitter) + case (_, methods) => + throw new IllegalArgumentException( + s"Service call [${methods.head.getFullName}] has declared an event out with no topic." + ) + } } -def createSupport(eventConfig: Config)(implicit materializer: Materializer): Option[EventingSupport] = - eventConfig.getString("support") match { - case "none" => - log.info("Eventing support turned off in configuration") - None - case s @ "google-pubsub" => - log.info("Creating google-pubsub eventing support") - Some(new GCPubsubEventingSupport(eventConfig.getConfig(s), materializer)) - case other => - throw new IllegalStateException(s"Check your configuration. There is no eventing support named: $other") + def createSupport(eventConfig: Config)(implicit system: ActorSystem, + materializer: Materializer): Option[EventingSupport] = + eventConfig.getString("support") match { + case "none" => + log.info("Eventing support turned off in configuration") + None + case s @ "google-pubsub" => + log.info("Creating google-pubsub eventing support") + Some(new GCPubsubEventingSupport(eventConfig.getConfig(s))) + case other => + throw new IllegalStateException(s"Check your configuration. There is no eventing support named: $other") + } + + def startConsumers( + router: UserFunctionRouter, + entities: List[ServableEntity], + topicSupport: Option[EventingSupport], + eventLogSupport: Option[EventingSupport], + projectionSupport: Option[ProjectionSupport] + )(implicit ec: ExecutionContext): Future[Cancellable] = { + + val consumers = createConsumers(entities) + val preparedTables = if (consumers.exists(_.eventSource.source.isEventLog)) { + projectionSupport.fold(Future.successful(Done.done()))(_.prepare()) + } else Future.successful(Done) + + preparedTables.map { _ => + consumers match { + case Nil => Cancellable.alreadyCancelled + case consumers => + val running = consumers.map(startConsumer(router, topicSupport, eventLogSupport)) + + new Cancellable { + override def cancel(): Boolean = + running.foldLeft(true)((success, cancellable) => success || cancellable.cancel()) + + override def isCancelled: Boolean = running.forall(_.isCancelled) + } + } + } } -def createStreams(router: UserFunctionRouter, - entityDiscoveryClient: EntityDiscoveryClient, - entities: Seq[ServableEntity], - support: EventingSupport): Option[RunnableGraph[(Emitter, Future[Done])]] = - createEventMappings(entities) match { - case Nil => None - case eventMappings => - val allEligibleOutputsByMethodDescriptor = - eventMappings - .flatMap(_.routes.collect { - case (m, e) if e.out != "" => (m.getFullName, e.out) - }) - .toMap - - type TopicName = String - type Record = (TopicName, ProtobufAny) - - val inNOutBurger: Seq[ - (Option[(TopicName, Source[Record, Future[Cancellable]])], Option[(TopicName, Flow[Record, AnyRef, NotUsed])]) - ] = - for { - EventMapping(entity, routes) <- eventMappings - (mdesc, eventing) <- routes.toSeq // Important since we do not want dedupe that we get from the map otherwise - } yield { - log.info("Creating route for {}", eventing) - val commandHandler = new CommandHandler(entity, mdesc, router, noEmitter, entityDiscoveryClient, log) // Could we reuse these from Serve? - - val in = Option(eventing.in).collect({ - case topic if topic != "" => - val source = - support - .createSource(topic, commandHandler) - .via(commandHandler.flow) - .collect({ case any if eventing.out != "" => (eventing.out, any) }) //Without an out there is nothing to persist - (topic, source) - }) - - val out = Option(eventing.out).collect({ - case topic if topic != "" => - val dest = Flow[Record] - .map(_._2) - .via(support.createDestination(topic, commandHandler)) - .dropWhile(_ => true) - (topic, dest) - }) - - (in, out) + private def startConsumer( + router: UserFunctionRouter, + topicSupport: Option[EventingSupport], + eventLogSupport: Option[EventingSupport] + )(consumer: EventConsumer): Cancellable = { + + val maybeEventingSupport = consumer.eventSource match { + case EventSourceProto(_, EventSourceProto.Source.Topic(_), _) => + topicSupport + case EventSourceProto(_, EventSourceProto.Source.EventLog(_), _) => + eventLogSupport + case EventSourceProto(_, EventSourceProto.Source.Empty, _) => + throw new IllegalArgumentException( + s"Eventing consumer [${consumer.methods.head._1}] has declared an input with no source." + ) + } + + val eventSource = maybeEventingSupport match { + case Some(eventingSupport) if eventingSupport.supportsSource => + eventingSupport.createSource(consumer.eventSource, consumer.entity.serviceName) + case Some(eventingSupport) => + throw new IllegalArgumentException( + s"Eventing consumer [${consumer.methods.head._1}] has declared an input of [${eventingSupport.name}], but this does not support being used as an event source." + ) + case None => + throw new IllegalArgumentException( + s"Eventing consumer [${consumer.methods.head._1}] has declared a [${consumer.eventSource.source}] event source, but this event source isn't supported." + ) + } + + val eventDestinations = consumer.outs.map { + case dest @ EventDestinationProto(EventDestinationProto.Destination.Topic(topic), _) => + topicSupport match { + case Some(support) => support.createDestination(dest) + case None => + throw new IllegalArgumentException( + s"Eventing consumer has declared an output topic [$topic}], but no topic eventing support has been provided." + ) + } + case EventDestinationProto(EventDestinationProto.Destination.Empty, _) => + throw new IllegalArgumentException(s"Eventing consumer has declared an input with no destination.") + } + + val killSwitch = eventSource.run( + entityToCommand[eventSource.SourceEventRef](consumer) via + routeCommands(router) via + forwardToOutputs(consumer, eventDestinations) + ) + + new Cancellable { + private val cancelled = new AtomicBoolean() + + override def cancel(): Boolean = + if (cancelled.compareAndSet(false, true)) { + killSwitch.cancel() + true + } else { + false } - val sources = inNOutBurger.collect({ case (Some(in), _) => in }) + override def isCancelled: Boolean = cancelled.get() + } + } - val deadLetters = - Flow[Record] - .map({ - case (topic, msg) => - log.warn( - s"Message destined for eventing topic '${topic}' discarded since no such topic is found in the configuration." - ) - msg - }) - .dropWhile(_ => true) + /** + * This flow is responsible for turning an entity that has come in from a event source into a command. + * + * The command is paired with information about which method it should be routed to, and where the result should be + * output to. + */ + private def entityToCommand[Ref](consumer: EventConsumer): Flow[SourceEvent[Ref], MessageIn[Ref], NotUsed] = + Flow[SourceEvent[Ref]].map { sourceEvent => + val cloudEvent = sourceEvent.event + + val messageAny = MediaType.parse(cloudEvent.datacontenttype) match { + case Right(protobuf) if protobuf.isApplication && ProtobufMediaSubTypes(protobuf.subType) => + val messageType = protobuf.params + .get("proto") + .orElse(protobuf.params.get("messageType")) + .getOrElse(cloudEvent.`type`) + ProtobufAny("type.googleapis.com/" + messageType, cloudEvent.data.getOrElse(ByteString.EMPTY)) + + case Right(any) if any.isApplication && any.subType == ProtobufAnyMediaSubType => + // This is the content type that event logging will use + ProtobufAny(cloudEvent.`type`, cloudEvent.data.getOrElse(ByteString.EMPTY)) + + case Right(MediaTypes.`application/json`) => + encodeJsonToAny(cloudEvent.data, cloudEvent.`type`) + + case Right(typedJson) if typedJson.isApplication && typedJson.subType.endsWith("+json") => + encodeJsonToAny(cloudEvent.data, cloudEvent.`type`) + + case Right(utf8) if utf8.isText && utf8.params.get("charset").forall(_ == "utf-8") => + // Fast case for UTF-8 so we don't have to decode and reencode it + encodeUtf8StringBytesToAny(cloudEvent.data) + + case Right(string) if string.isText => + encodeStringToAny(cloudEvent.data.getOrElse(ByteString.EMPTY).toString(string.params("charset"))) + + case _ => + encodeBytesToAny(cloudEvent.data) + } - val destinations = - ("", deadLetters) +: inNOutBurger.collect({ case (_, Some(out)) => out }) + // Select a method + val maybeConsumerMethod = + if (messageAny.typeUrl.startsWith("p.cloudstate.io/") || messageAny.typeUrl.startsWith("json.cloudstate.io/")) { + consumer.methods.get(ProtobufAny.scalaDescriptor.fullName) + } else { + val desiredType = messageAny.typeUrl.split("/", 2).last + consumer.methods.get(desiredType).orElse(consumer.methods.get(ProtobufAny.scalaDescriptor.fullName)) + } - val emitter = - Source.queue[Record](destinations.size * 128, OverflowStrategy.backpressure) + val consumerMethod = maybeConsumerMethod match { + case Some(method) => + method + case None => + throw new IllegalArgumentException( + s"No method can be found to handle protobuf type of [${messageAny.typeUrl}] on input ${consumer.eventSource}. Either declare a method for this type, or declare a method that accepts google.protobuf.Any." + ) + } - val destinationMap = destinations.map(_._1).sorted.zipWithIndex.toMap - val destinationSelector = - (r: Record) => destinationMap.get(r._1).getOrElse(destinationMap("")) // Send to deadLetters if no match + MessageIn(sourceEvent.ref, + consumerMethod, + UserFunctionRouter.Message(messageAny, cloudEventToMetadata(cloudEvent))) + } + + private def cloudEventToMetadata(cloudEvent: CloudEvent): Metadata = { + import MetadataEntry.Value.StringValue + // We use the HTTP binary mode transcoding rules + val builder = Seq.newBuilder[MetadataEntry] + builder += MetadataEntry("ce-id", StringValue(cloudEvent.id)) + builder += MetadataEntry("ce-source", StringValue(cloudEvent.source)) + builder += MetadataEntry("ce-specversion", StringValue(cloudEvent.specversion)) + builder += MetadataEntry("ce-type", StringValue(cloudEvent.`type`)) + builder += MetadataEntry("Content-Type", StringValue(cloudEvent.datacontenttype)) + + cloudEvent.dataschema.foreach(v => builder += MetadataEntry("ce-dataschema", StringValue(v))) + cloudEvent.subject.foreach(v => builder += MetadataEntry("ce-subject", StringValue(v))) + cloudEvent.time.foreach(v => builder += MetadataEntry("ce-time", StringValue(v.toString))) + + Metadata(builder.result()) + } - val eventingFlow = Flow - .fromGraph(GraphDSL.create() { implicit b => + /** + * This flow is responsible for routing commands through the router. + */ + private def routeCommands[Ref](router: UserFunctionRouter): Flow[MessageIn[Ref], RouteResult[Ref], NotUsed] = + Flow[MessageIn[Ref]].flatMapConcat { + case MessageIn(eventSourceRef, consumerMethod, message) => + if (consumerMethod.methodDescriptor.isServerStreaming || consumerMethod.methodDescriptor.isClientStreaming) { + Source + .single(message) + .via( + router.handle(consumerMethod.methodDescriptor.getService.getFullName, + consumerMethod.methodDescriptor.getName, + Metadata.defaultInstance) + ) + .map(reply => ResultPart(consumerMethod.outIndex, reply)) + .concat(Source.single(ResultEnd(eventSourceRef))) + } else { + Source + .future( + router.handleUnary(consumerMethod.methodDescriptor.getService.getFullName, + consumerMethod.methodDescriptor.getName, + message) + ) + .mapConcat { reply => + List(ResultPart(consumerMethod.outIndex, reply), ResultEnd(eventSourceRef)) + } + } + + } + + /** + * This flow is responsible for forwarding routing result replies on to the configuration destinations. + */ + private def forwardToOutputs[Ref](consumer: EventConsumer, + destinations: Vector[EventDestination]): Flow[RouteResult[Ref], Ref, NotUsed] = + if (consumer.outs.isEmpty) { + Flow[RouteResult[Ref]].collect { + case ResultEnd(ref) => ref + } + } else { + Flow[RouteResult[Ref]].zipWithIndex + .via(Flow.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ - val mergeForPublish = b.add(Merge[Record](sources.size + 1)) // 1 + for emitter + val bypassPort = consumer.outs.size + val ports = bypassPort + 1 + + val broadcast = b.add( + Partition[(RouteResult[Ref], Long)](ports, + result => + result._1 match { + case ResultPart(outIdx, _) => outIdx.getOrElse(bypassPort) + case _ => bypassPort + }) + ) + + val merge = b.add(new MergeSequence[(RouteResult[Ref], Long)](ports)(_._2)) + + destinations.zipWithIndex.foreach { + case (dest, outPort) => + val split = b.add(Broadcast[(RouteResult[Ref], Long)](2)) + val zip = b.add(Zip[AnyRef, (RouteResult[Ref], Long)]()) + + broadcast.out(outPort) ~> split.in + + split.out(0).collect { + case (ResultPart(_, UserFunctionReply(Some(ClientAction(ClientAction.Action.Reply(reply), _)), _, _)), + _) => + createDesintationEvent(reply.payload.get, consumer.entity.serviceName, reply.metadata) + case (ResultPart(_, other), _) => + throw new IllegalStateException(s"Reply from router did not have a reply client action: $other") + // Shouldn't happen: + case t => throw new IllegalStateException(s"result end routed through output flow? $t") + } ~> dest.eventStreamOut ~> zip.in0 + split.out(1) ~> zip.in1 + + zip.out.map(_._2) ~> merge.in(outPort) + } - val routeToDestination = - b.add(Partition[Record](destinations.size, destinationSelector)) + broadcast.out(bypassPort) ~> merge.in(bypassPort) + FlowShape(broadcast.in, merge.out) + })) + .collect { + case (ResultEnd(ref), _) => ref + } + } + + def createDesintationEvent(payload: ProtobufAny, serviceName: String, maybeMetadata: Option[Metadata]) = { + val metadata = maybeMetadata + .getOrElse(Metadata.defaultInstance) + .entries + .collect { + case MetadataEntry(key, MetadataEntry.Value.StringValue(value), _) => key -> value + } + .toMap + + val (ceType, contentType, bytes) = payload.typeUrl match { + case json if json.startsWith("json.cloudstate.io/") => + (json.stripPrefix("json.cloudstate.io/"), "application/json", decodeBytes(payload)) + case "p.cloudstate.io/string" => + ("", "text/plain; charset=utf-8", decodeBytes(payload)) + case "p.cloudstate.io/bytes" => + ("", "application/octet-stream", decodeBytes(payload)) + case generic => + (generic.dropWhile(_ != '/').drop(1), "application/protobuf", payload.value) + } + + DestinationEvent( + CloudEvent( + id = metadata.getOrElse("ce-id", UUID.randomUUID().toString), + source = metadata.getOrElse("ce-source", serviceName), + specversion = metadata.getOrElse("ce-specversion", "1.0"), + `type` = metadata.getOrElse("ce-type", ceType), + datacontenttype = metadata.getOrElse("Content-Type", contentType), + dataschema = metadata.get("ce-dataschema"), + subject = metadata.get("ce-subject"), + // todo the time can be any RFC3339 time string, Instant.parse only parses ISO8601, which is just one allowable + // format in RFC3339 + time = metadata.get("ce-time").map(Instant.parse).orElse(Some(Instant.now())), + data = Some(bytes) + ) + ) + } - val mergeForExit = b.add(Merge[AnyRef](destinations.size)) + private case class MessageIn[Ref](eventSourceRef: Ref, + consumerMethod: EventConsumerMethod, + message: UserFunctionRouter.Message) - sources.zipWithIndex foreach { - case ((topicName, source), idx) => - b.add(source).out ~> mergeForPublish.in(idx + 1) // 0 we keep for emitter - } + private sealed trait RouteResult[+Ref] - mergeForPublish.out ~> routeToDestination.in + private case class ResultPart(outIdx: Option[Int], out: UserFunctionReply) extends RouteResult[Nothing] - destinations foreach { - case (topicName, flow) => - routeToDestination.out(destinationMap(topicName)) ~> b.add(flow) ~> mergeForExit - } + private case class ResultEnd[Ref](sourceEventRef: Ref) extends RouteResult[Ref] - FlowShape(mergeForPublish.in(0), mergeForExit.out) - }) - - Some( - emitter - .via(eventingFlow) - .toMat(Sink.ignore)((queue, future) => { - val emitter = new Emitter { - override def emit(event: ProtobufAny, method: MethodDescriptor): Boolean = - if (event.value.isEmpty) false - else { - allEligibleOutputsByMethodDescriptor - .get(method.getFullName) // FIXME Check expected type of event compared to method.getOutputType - .map(out => queue.offer((out, event))) // FIXME handle this Future - .isDefined - } - } - (emitter, future) - }) - ) + private val ProtobufMediaSubTypes = Set("protobuf", "x-protobuf", "vnd.google.protobuf") + val ProtobufAnyMediaSubType = "vnd.cloudstate.protobuf.any" + + private def encodeByteArray(maybeBytes: Option[ByteString]) = maybeBytes match { + case None => ByteString.EMPTY + case Some(bytes) if bytes.isEmpty => + // Create a byte array the right size. It needs to have the tag and enough space to hold the length of the data + // (up to 5 bytes). + // Length encoding consumes 1 byte for every 7 bits of the field + val bytesLengthFieldSize = ((31 - Integer.numberOfLeadingZeros(bytes.size())) / 7) + 1 + val byteArray = new Array[Byte](1 + bytesLengthFieldSize) + val stream = CodedOutputStream.newInstance(byteArray) + stream.writeTag(1, WireFormat.WIRETYPE_LENGTH_DELIMITED) + stream.writeUInt32NoTag(bytes.size()) + UnsafeByteOperations.unsafeWrap(byteArray).concat(bytes) } -} - */ + private def encodeBytesToAny(bytes: Option[ByteString]): ProtobufAny = + ProtobufAny("p.cloudstate.io/bytes", encodeByteArray(bytes)) + + private def encodeJsonToAny(bytes: Option[ByteString], jsonType: String): ProtobufAny = + ProtobufAny("json.cloudstate.io/" + jsonType, encodeByteArray(bytes)) + + private def encodeUtf8StringBytesToAny(bytes: Option[ByteString]): ProtobufAny = + ProtobufAny("p.cloudstate.io/string", encodeByteArray(bytes)) + + private def encodeStringToAny(string: String): ProtobufAny = { + val builder = ByteString.newOutput() + val stream = CodedOutputStream.newInstance(builder) + stream.writeString(1, string) + ProtobufAny("p.cloudstate.io/string", builder.toByteString) + } + + private def decodeBytes(payload: ProtobufAny): ByteString = { + val stream = payload.value.newCodedInput() + @annotation.tailrec + def findField(): ByteString = + stream.readTag() match { + case 0 => + // 0 means EOF + ByteString.EMPTY + case feild1 if WireFormat.getTagFieldNumber(feild1) == 1 => + if (WireFormat.getTagWireType(feild1) == WireFormat.WIRETYPE_LENGTH_DELIMITED) { + stream.readBytes() + } else { + throw new ParseException("Expected length delimited field, tag was: " + feild1) + } + case other => + stream.skipField(other) + findField() + } + findField() + } + +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingSupport.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingSupport.scala new file mode 100644 index 000000000..c35e40702 --- /dev/null +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/EventingSupport.scala @@ -0,0 +1,114 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.proxy.eventing + +import java.time.Instant + +import akka.actor.Cancellable +import akka.{Done, NotUsed} +import akka.http.scaladsl.model.HttpEntity +import akka.stream.KillSwitch +import akka.stream.scaladsl.Flow +import com.google.protobuf.ByteString +import io.cloudstate.eventing.{EventDestination => EventDestinationProto, EventSource => EventSourceProto} + +import scala.concurrent.Future + +/** + * Eventing support. + * + * Different eventing implementations should implement this. + */ +trait EventingSupport { + def name: String + + /** Can this eventing support implementation be used as a source? */ + def supportsSource: Boolean + + /** Create a source for the given name which should route to the given handler. */ + def createSource(source: EventSourceProto, serviceName: String): EventSource + + /** Can this eventing support implementation be used as a destination? */ + def supportsDestination: Boolean + + /** Create a destination for the given destination name using the given handler. */ + def createDestination(destination: EventDestinationProto): EventDestination +} + +/** An event source. */ +trait EventSource { + + /** + * A type used to refer to the source event. The type of this and what it contains depends on the eventing + * implementation, it could be the whole event, or it could just be an identifier. The purpose for this is to + * allow the reference to be emitted from the flow to run the stream, so that it can be acknowledged. + */ + type SourceEventRef + + /** Run this event source with the given flow. */ + def run(flow: Flow[SourceEvent[SourceEventRef], SourceEventRef, _]): Cancellable +} + +/** An event destination. */ +trait EventDestination { + + /** + * The flow that consumes events to publish. + * + * This flow must produce one element for each event consumed, that message signals the acknowledgement that the + * corresponding consumed element has been successfully published, hence the upstream source can now be acknowledged. + * Order matters, the flow should now emit elements for events received later in the stream until the earlier events + * in the stream have been successfully published with corresponding events emitted for acknowledgement. + * + * In general, implementations should not attempt to retry when publishing fails. Rather, if a failure in publishing + * occurs, the stream should be terminated with an error. Cloudstate will then handle retries, using exponential + * backoffs, or routing to dead letters, etc. + */ + def eventStreamOut: Flow[DestinationEvent, AnyRef, NotUsed] + + /** + * Emit a single destination event. + * + * This is used when emitting events from a service call that have not come from another event source, eg, they + * have come from a gRPC call. + */ + def emitSingle(destinationEvent: DestinationEvent): Future[Done] +} + +/** An event produced by an event source. */ +case class SourceEvent[Ref]( + event: CloudEvent, + /** A reference to this source event */ + ref: Ref +) + +/** An event to be published to a destination */ +case class DestinationEvent( + event: CloudEvent +) + +case class CloudEvent( + id: String, + source: String, + specversion: String, + `type`: String, + datacontenttype: String, + dataschema: Option[String], + subject: Option[String], + time: Option[Instant], + data: Option[ByteString] +) diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/GooglePubsubEventing.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/GooglePubsubEventing.scala index 6e0e11906..0e110cbb4 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/GooglePubsubEventing.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/GooglePubsubEventing.scala @@ -16,20 +16,16 @@ package io.cloudstate.proxy.eventing -/* +import java.time.Instant +import java.time.format.DateTimeFormatter + import com.typesafe.config.{Config, ConfigFactory} -import akka.NotUsed +import akka.{Done, NotUsed} import akka.actor.{ActorSystem, Cancellable} import akka.grpc.GrpcClientSettings -import akka.stream.Materializer -import akka.stream.scaladsl.{Flow, Keep, Sink, Source} -import io.cloudstate.proxy.Serve.CommandHandler -import io.cloudstate.proxy.EntityDiscoveryManager.ServableEntity -import io.cloudstate.proxy.entity.UserFunctionCommand -import io.cloudstate.eventing.Eventing -import com.google.protobuf.any.{Any => ProtobufAny} -import com.google.protobuf.{ByteString => ProtobufByteString} -import com.google.protobuf.Descriptors.MethodDescriptor +import akka.stream.{KillSwitch, KillSwitches, Materializer, OverflowStrategy, SourceShape} +import akka.stream.scaladsl.{Broadcast, Concat, Flow, GraphDSL, Keep, RestartSource, Sink, Source} +import io.cloudstate.eventing.{EventDestination => EventDestinationProto, EventSource => EventSourceProto} import io.grpc.{ CallCredentials => gRPCCallCredentials, Status => gRPCStatus, @@ -38,24 +34,25 @@ import io.grpc.{ import io.grpc.auth.MoreCallCredentials import com.google.auth.oauth2.GoogleCredentials import com.google.pubsub.v1.pubsub.{ - AcknowledgeRequest, PublishRequest, - PublishResponse, PubsubMessage, ReceivedMessage, StreamingPullRequest, + StreamingPullResponse, Subscription, Topic, PublisherClient => ScalaPublisherClient, SubscriberClient => ScalaSubscriberClient } import java.util.Collections +import java.util.concurrent.atomic.AtomicBoolean -import akka.util.ByteString -import io.cloudstate.protocol.entity.Metadata +import com.google.protobuf.ByteString +import io.cloudstate.eventing -import scala.util.Try -import scala.concurrent.{Future, Promise} +import scala.collection.immutable +import scala.util.{Success, Try} +import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration._ /** @@ -86,24 +83,24 @@ final class PubSubSettings private ( } /** - * Endpoint hostname where the gRPC connection is made. - */ + * Endpoint hostname where the gRPC connection is made. + */ def withHost(host: String): PubSubSettings = copy(host = host) /** - * Endpoint port where the gRPC connection is made. - */ + * Endpoint port where the gRPC connection is made. + */ def withPort(port: Int): PubSubSettings = copy(port = port) /** - * A filename on the classpath which contains the root certificate authority - * that is going to be used to verify certificate presented by the gRPC endpoint. - */ + * A filename on the classpath which contains the root certificate authority + * that is going to be used to verify certificate presented by the gRPC endpoint. + */ def withRootCa(rootCa: String): PubSubSettings = copy(rootCa = Some(rootCa)) /** - * Credentials that are going to be used for gRPC call authorization. - */ + * Credentials that are going to be used for gRPC call authorization. + */ def withCallCredentials(callCredentials: gRPCCallCredentials): PubSubSettings = copy(callCredentials = Some(callCredentials)) @@ -114,28 +111,28 @@ final class PubSubSettings private ( new PubSubSettings(host, port, rootCa, callCredentials) /** - * Creates a GrpcClientSettings from this PubSubSettings - */ + * Creates a GrpcClientSettings from this PubSubSettings + */ def createClientSettings()(implicit sys: ActorSystem): GrpcClientSettings = { val sslConfig = rootCa.fold("") { rootCa => s""" - |ssl-config { - | disabledKeyAlgorithms = [] - | trustManager = { - | stores = [ - | { type = "PEM", path = "$rootCa", classpath = true } - | ] - | } - |}""".stripMargin + |ssl-config { + | disabledKeyAlgorithms = [] + | trustManager = { + | stores = [ + | { type = "PEM", path = "$rootCa", classpath = true } + | ] + | } + |}""".stripMargin } val akkaGrpcConfig = s""" - |host = "$host" - |port = $port - | - |$sslConfig - |""".stripMargin + |host = "$host" + |port = $port + | + |$sslConfig + |""".stripMargin val settings = //TODO consider using Discovery and/or other settings from: https://github.com/akka/akka-grpc/blob/master/runtime/src/main/resources/reference.conf#L36 GrpcClientSettings.fromConfig( @@ -156,8 +153,11 @@ object GCPubsubEventingSupport { final val USING_CRD = "using-crd" } -class GCPubsubEventingSupport(config: Config, materializer: Materializer) extends EventingSupport { +class GCPubsubEventingSupport(config: Config)(implicit materializer: Materializer, system: ActorSystem) + extends EventingSupport { + import GCPubsubEventingSupport._ + import system.dispatcher final val projectId: String = config.getString("project-id") final val pollInterval: FiniteDuration = config.getDuration("poll-interval").toMillis.millis @@ -193,10 +193,6 @@ class GCPubsubEventingSupport(config: Config, materializer: Materializer) extend // Create the gRPC clients used to communicate with Google Pubsub final val (subscriberClient, publisherClient) = { - implicit val m = materializer - implicit val s = materializer.system - implicit val d = s.dispatcher - val clientSettings = settings.createClientSettings() // We're reusing the same clients for all communication @@ -204,151 +200,284 @@ class GCPubsubEventingSupport(config: Config, materializer: Materializer) extend val publisherClient = ScalaPublisherClient(clientSettings) // Make sure that we don't leak connections - s.registerOnTermination(subscriberClient.close()) - s.registerOnTermination(publisherClient.close()) + system.registerOnTermination(subscriberClient.close()) + system.registerOnTermination(publisherClient.close()) (subscriberClient, publisherClient) } private[this] val batchResults = if (downstreamBatchDeadline > 0.seconds || downstreamBatchSize > 1) { - Flow[ProtobufAny] - .map(any => PubsubMessage(data = any.toByteString)) + Flow[DestinationEvent] + .map(transformDestinationEvent) .groupedWithin(downstreamBatchSize, downstreamBatchDeadline) - } else Flow[ProtobufAny].map(any => PubsubMessage(data = any.toByteString) :: Nil) + } else Flow[DestinationEvent].map(event => transformDestinationEvent(event) :: Nil) - private[this] final def createManualSource( + private def sourceToSourceToFlow[In, Out, MOut](f: Source[In, NotUsed] => Source[Out, MOut]): Flow[In, Out, NotUsed] = + Flow[In].prefixAndTail(0).flatMapConcat { case (Nil, in) => f(in) } + + private[this] final def runManualFlow( subscription: String, - commandHandler: CommandHandler - ): Source[UserFunctionCommand, Future[Cancellable]] = { - val cancellable = Promise[Cancellable] + processingFlow: Flow[SourceEvent[String], String, _] + ): KillSwitch = { val request = StreamingPullRequest(subscription = subscription, streamAckDeadlineSeconds = upstreamAckDeadlineSeconds) - val pull = Source - .single(request) - .concat( - Source.tick(0.seconds, pollInterval, request.withSubscription("")).mapMaterializedValue(cancellable.success) - ) + val streamingPull: Flow[StreamingPullRequest, StreamingPullResponse, NotUsed] = + sourceToSourceToFlow(subscriberClient.streamingPull) + + val source = RestartSource.withBackoff(3.seconds, 30.seconds, 0.2) { () => + Source.fromGraph(GraphDSL.create[SourceShape[Nothing]]() { implicit builder => + import GraphDSL.Implicits._ + + val concat = builder.add(Concat[StreamingPullRequest](2)) + val outSplitter = builder.add(Broadcast[StreamingPullResponse](2, eagerCancel = true)) + + val responseToEvents = Flow[StreamingPullResponse] + .mapConcat(_.receivedMessages.toVector) // Note: receivedMessages is most likely a Vector already due to impl, so should be a noop + .map(transformReceivedMessage(subscription)) + + val acksToRequest = Flow[String] + .groupedWithin(10, upstreamAckDeadline / 2) + .map(ackIds => StreamingPullRequest(ackIds = ackIds)) + + val circularDeadlockBreaker = Flow[StreamingPullRequest].buffer(1, OverflowStrategy.backpressure) + + // This is the main line through the graph, where we send our pull request for the subscription down the + // streaming pull gRPC request, convert to events, process, convert acks to requests (which acknowledge them), + // then send those back to the streaming pull via the concat + // In the middle we have the outSplitter, it's positioned after the streamingPull, so that when it cancels, + // it immediately cancels the pull, but allows the events in progress to continue to be processed. + Source.single(request) ~> + concat ~> + streamingPull ~> + outSplitter ~> + responseToEvents ~> + processingFlow ~> + acksToRequest ~> + circularDeadlockBreaker ~> + concat + + // Meanwhile, we peel off the output using outSplitter, and ignore everything in it. + // This allows us to return a Source that when a failure occurs anywhere in the stream, will emit a failure, + // so the RestartSource can handle it and restart. It also allows us to cancel the stream via that source when + // we shutdown. + val out = outSplitter.collect(PartialFunction.empty) + + SourceShape(out.outlet) + }) + } - val ackSink = - Flow[ReceivedMessage] - .map(_.ackId) - .groupedWithin(10, upstreamAckDeadline / 2) // TODO adjust these - .mapAsyncUnordered(1 /*parallelism*/ )( - ackIds => subscriberClient.acknowledge(AcknowledgeRequest(subscription = subscription, ackIds = ackIds)) - ) - .toMat(Sink.ignore)(Keep.right) - - subscriberClient // FIXME add retries, backoff etc - .streamingPull(pull) // TODO Consider Source.repeat(()).flatMapConcat(_ => subscriberClient.streamingPull(pull)) - .mapConcat(_.receivedMessages.toVector) // Note: receivedMessages is most likely a Vector already due to impl, so should be a noop - .alsoTo(ackSink) // at-most-once // FIXME Add stats generation/collection so we can track progress here - .collect({ - case ReceivedMessage(_, Some(msg), _) => - commandHandler - .deserialize(Metadata.defaultInstance)(ByteString.fromByteBuffer(msg.data.asReadOnlyByteBuffer())) - }) // TODO - investigate ProtobufAny.fromJavaAny(PbAnyJava.parseFrom(msg.data)) - .mapMaterializedValue(_ => cancellable.future) + source + .viaMat(KillSwitches.single)(Keep.right) + .to(Sink.ignore) + .run() } - private[this] final def createByProxyManagedSource( + private[this] final def runByProxyManagedFlow( sourceName: String, subscription: String, - commandHandler: CommandHandler - ): Source[UserFunctionCommand, Future[Cancellable]] = - Source - .setup { (mat, attrs) => - val topic = s"projects/${projectId}/topics/${sourceName}" - implicit val ec = mat.system.dispatcher - val t = Topic(topic) - val s = Subscription( - name = subscription, - topic = topic, - pushConfig = None, - ackDeadlineSeconds = upstreamAckDeadlineSeconds, - retainAckedMessages = false, - messageRetentionDuration = None // TODO configure this? - ) - - Source - .fromFutureSource( - for { - _ <- publisherClient - .createTopic(t) - .recover({ - case ex: gRPCStatusRuntimeException if ex.getStatus.getCode == gRPCStatus.Code.ALREADY_EXISTS => - t - }) - _ <- subscriberClient - .createSubscription(s) - .recover({ - case ex: gRPCStatusRuntimeException if ex.getStatus.getCode == gRPCStatus.Code.ALREADY_EXISTS => - s - }) - } yield createManualSource(subscription, commandHandler) - ) - } - .mapMaterializedValue(_.flatten.flatten) + processingFlow: Flow[SourceEvent[String], String, _] + ): Future[KillSwitch] = { + val topic = s"projects/${projectId}/topics/${sourceName}" + val t = Topic(topic) + val s = Subscription( + name = subscription, + topic = topic, + pushConfig = None, + ackDeadlineSeconds = upstreamAckDeadlineSeconds, + retainAckedMessages = false, + messageRetentionDuration = None // TODO configure this? + ) + + for { + _ <- publisherClient + .createTopic(t) + .recover({ + case ex: gRPCStatusRuntimeException if ex.getStatus.getCode == gRPCStatus.Code.ALREADY_EXISTS => + t + }) + _ <- subscriberClient + .createSubscription(s) + .recover({ + case ex: gRPCStatusRuntimeException if ex.getStatus.getCode == gRPCStatus.Code.ALREADY_EXISTS => + s + }) + } yield runManualFlow(subscription, processingFlow) + } - private[this] final def createUsingCrdManagedSource( + private[this] final def createUsingCrdManagedFlow( sourceName: String, subscription: String, - commandHandler: CommandHandler - ): Source[UserFunctionCommand, Future[Cancellable]] = + processingFlow: Flow[SourceEvent[String], String, _] + ): Future[KillSwitch] = throw new IllegalStateException("NOT IMPLEMENTED YET") // FIXME IMPLEMENT THIS: create CRD-requests - override final def createSource(sourceName: String, - commandHandler: CommandHandler): Source[UserFunctionCommand, Future[Cancellable]] = { - val subscription = s"projects/${projectId}/subscriptions/${sourceName}_${commandHandler.fullCommandName}" - manageTopicsAndSubscriptions match { - case MANUALLY => createManualSource(subscription, commandHandler) - case USING_CRD => createUsingCrdManagedSource(sourceName, subscription, commandHandler) - case BY_PROXY => createByProxyManagedSource(sourceName, subscription, commandHandler) + override def name: String = "Google PubSub" + + override def supportsSource: Boolean = true + + override def createSource(source: EventSourceProto, serviceName: String): EventSource = new EventSource { + override type SourceEventRef = String + + override def run(flow: Flow[SourceEvent[String], String, _]): Cancellable = { + val consumerGroup = source.consumerGroup match { + case "" => serviceName + case cg => cg + } + val subscription = source.source match { + case EventSourceProto.Source.Topic(topic) => + s"projects/$projectId/subscriptions/${topic}_$consumerGroup" + case other => + throw new IllegalArgumentException(s"Google PubSub source unable to be used to server $other") + + } + val killSwitch = manageTopicsAndSubscriptions match { + case MANUALLY => runManualFlow(subscription, flow) + case USING_CRD => futureKillSwitch(createUsingCrdManagedFlow(consumerGroup, subscription, flow)) + case BY_PROXY => futureKillSwitch(runByProxyManagedFlow(consumerGroup, subscription, flow)) + } + + new Cancellable { + private val running = new AtomicBoolean() + + override def cancel(): Boolean = + if (running.compareAndSet(false, true)) { + killSwitch.shutdown() + true + } else { + false + } + + override def isCancelled: Boolean = running.get() + } } } - private[this] final def createDestination(topic: String): Flow[ProtobufAny, AnyRef, NotUsed] = - batchResults - .mapAsyncUnordered(1 /*parallelism*/ )( - batch => - publisherClient.publish(PublishRequest(topic = topic, messages = batch)) // FIXME add retries, backoff etc - ) + private def transformReceivedMessage(source: String)(receivedMessage: ReceivedMessage): SourceEvent[String] = { + val message = + receivedMessage.message.getOrElse(throw new IllegalArgumentException("Received message has no message")) + // Using the spec here, which was not merged, to handle cloudevents: + // https://github.com/google/knative-gcp/pull/1 + // case sensitivity? + val maybeContentType = message.attributes.get("Content-Type").orElse(message.attributes.get("content-type")) + val cloudEvent = maybeContentType match { + case Some("application/cloudevents+json") => + // Todo: handle structured cloudevents + throw new UnsupportedOperationException("CloudEvents structured binding not yet supported") + + case defaultCt if message.attributes.contains("ce-specversion") => + CloudEvent( + id = message.attributes.getOrElse("ce-id", message.messageId), + source = message.attributes.getOrElse("ce-source", source), + specversion = message.attributes("ce-specversion"), + `type` = message.attributes.getOrElse("ce-type", ""), + datacontenttype = + message.attributes.get("ce-datacontenttype").orElse(defaultCt).getOrElse("application/octet-stream"), + dataschema = message.attributes.get("ce-dataschema"), + subject = message.attributes.get("ce-subject"), + time = message.attributes + .get("ce-time") + .flatMap(t => Try(Instant.from(DateTimeFormatter.ISO_OFFSET_DATE_TIME.parse(t))).toOption), + data = Some(message.data) + ) + + case _ => + CloudEvent( + id = message.messageId, + source = source, + specversion = "1.0", + `type` = "", + datacontenttype = maybeContentType.getOrElse("application/octet-stream"), + dataschema = None, + subject = None, + time = message.publishTime.map(t => Instant.ofEpochSecond(t.seconds, t.nanos)), + data = Some(message.data) + ) + } - private[this] final def createByProxyManagedDestination(topic: String): Flow[ProtobufAny, AnyRef, NotUsed] = - Flow - .setup { (mat, attrs) => - implicit val ec = mat.system.dispatcher - val destination = createDestination(topic = topic) - val t = Topic(topic) - val f = publisherClient - .createTopic(t) - .recover({ - case ex: gRPCStatusRuntimeException if ex.getStatus.getCode == gRPCStatus.Code.ALREADY_EXISTS => - t - }) - .map(_ => destination) - - Flow.lazyInitAsync(() => f) + SourceEvent(cloudEvent, receivedMessage.ackId) + } + + override def supportsDestination: Boolean = true + + override def createDestination(destination: eventing.EventDestination): EventDestination = new EventDestination { + + private val topic = destination.destination match { + case EventDestinationProto.Destination.Topic(topic) => + s"projects/$projectId/topic/$topic" + case other => + throw new IllegalArgumentException(s"Google PubSub source unable to be used to server $other") + } + + private val topicReady: Future[Done] = manageTopicsAndSubscriptions match { + case MANUALLY => Future.successful(Done) + case USING_CRD => createUsingCrdManagedDestination(topic = topic) + case BY_PROXY => createByProxyManagedDestination(topic = topic) + } + + private val destinationFlow: Flow[DestinationEvent, AnyRef, NotUsed] = + batchResults + .mapAsyncUnordered(1 /*parallelism*/ )( + batch => publisherClient.publish(PublishRequest(topic = topic, messages = batch)) + ) + .mapConcat(_.messageIds) + + override def eventStreamOut: Flow[DestinationEvent, AnyRef, NotUsed] = + topicReady.value match { + case Some(Success(_)) => + destinationFlow + case _ => + Flow + .lazyInitAsync(() => topicReady.map(_ => destinationFlow)) + .mapMaterializedValue(_ => NotUsed) } - .mapMaterializedValue(_ => NotUsed) - private[this] final def createUsingCrdManagedDestination(topic: String): Flow[ProtobufAny, AnyRef, NotUsed] = + override def emitSingle(destinationEvent: DestinationEvent): Future[Done] = + topicReady.value match { + case Some(Success(_)) => + publisherClient + .publish(PublishRequest(topic, Seq(transformDestinationEvent(destinationEvent)))) + .map(_ => Done) + case _ => + for { + _ <- topicReady + _ <- publisherClient.publish(PublishRequest(topic, Seq(transformDestinationEvent(destinationEvent)))) + } yield Done + } + } + + private[this] final def createByProxyManagedDestination(topic: String): Future[Done] = + publisherClient + .createTopic(Topic(topic)) + .map(_ => Done) + .recover({ + case ex: gRPCStatusRuntimeException if ex.getStatus.getCode == gRPCStatus.Code.ALREADY_EXISTS => + Done + }) + + private def transformDestinationEvent(destinationEvent: DestinationEvent): PubsubMessage = { + val attributes = Map( + "ce-id" -> destinationEvent.event.id, + "ce-source" -> destinationEvent.event.source, + "ce-specversion" -> destinationEvent.event.specversion, + "ce-type" -> destinationEvent.event.`type`, + "ce-datacontenttype" -> destinationEvent.event.datacontenttype + ) ++ + destinationEvent.event.subject.map(s => "ce-subject" -> s) ++ + destinationEvent.event.dataschema.map(d => "ce-dataschema" -> d) ++ + destinationEvent.event.time.map(t => "ce-time" -> DateTimeFormatter.ISO_OFFSET_DATE_TIME.format(t)) + + PubsubMessage(destinationEvent.event.data.getOrElse(ByteString.EMPTY), attributes) + } + + private[this] final def createUsingCrdManagedDestination(topic: String): Future[Done] = throw new IllegalStateException("NOT IMPLEMENTED YET") // FIXME IMPLEMENT THIS: create CRD-requests - //FIXME Add stats generation/collection so we can track progress here - override final def createDestination(destinationName: String, - handler: CommandHandler): Flow[ProtobufAny, AnyRef, NotUsed] = - if (destinationName == "") - Flow[ProtobufAny] - else { - val topic = s"projects/${projectId}/topics/${destinationName}" - manageTopicsAndSubscriptions match { - case MANUALLY => createDestination(topic = topic) - case USING_CRD => createUsingCrdManagedDestination(topic = topic) - case BY_PROXY => createByProxyManagedDestination(topic = topic) - } - } + private def futureKillSwitch(future: Future[KillSwitch])(implicit ec: ExecutionContext): KillSwitch = new KillSwitch { + override def shutdown(): Unit = future.foreach(_.shutdown()) + + override def abort(ex: Throwable): Unit = future.foreach(_.abort(ex)) + } } - */ diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/ProjectionSupport.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/ProjectionSupport.scala new file mode 100644 index 000000000..d89fef409 --- /dev/null +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventing/ProjectionSupport.scala @@ -0,0 +1,45 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.proxy.eventing + +import akka.Done +import akka.actor.typed.ActorSystem +import akka.projection.cloudstate.TestAtLeastOnceFlowProjection +import akka.projection.{ProjectionContext, ProjectionId} +import akka.projection.scaladsl.{AtLeastOnceFlowProjection, SourceProvider} +import akka.stream.scaladsl.FlowWithContext + +import scala.concurrent.Future + +trait ProjectionSupport { + def create[Offset, Envelope]( + projectionId: ProjectionId, + sourceProvider: SourceProvider[Offset, Envelope], + flow: FlowWithContext[Envelope, ProjectionContext, Done, ProjectionContext, _] + ): AtLeastOnceFlowProjection[Offset, Envelope] + + def prepare(): Future[Done] = Future.successful(Done) +} + +class InMemoryProjectionSupport(system: ActorSystem[_]) extends ProjectionSupport { + override def create[Offset, Envelope]( + projectionId: ProjectionId, + sourceProvider: SourceProvider[Offset, Envelope], + flow: FlowWithContext[Envelope, ProjectionContext, Done, ProjectionContext, _] + ): AtLeastOnceFlowProjection[Offset, Envelope] = + TestAtLeastOnceFlowProjection(projectionId, sourceProvider, flow) +} diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala index c521d3746..67bbbe404 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedEntity.scala @@ -25,6 +25,7 @@ import akka.actor._ import akka.cloudstate.EntityStash import akka.cluster.sharding.ShardRegion import akka.persistence._ +import akka.persistence.journal.Tagged import akka.stream.scaladsl._ import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import akka.util.Timeout @@ -184,7 +185,7 @@ object EventSourcedEntity { final case class Configuration( serviceName: String, - userFunctionName: String, + entityTypeName: String, passivationTimeout: Timeout, sendQueueSize: Int ) @@ -212,7 +213,7 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, import io.cloudstate.proxy.telemetry.EventSourcedInstrumentation.StashContext - override final def persistenceId: String = configuration.userFunctionName + entityId + override final def persistenceId: String = configuration.entityTypeName + "|" + entityId private val actorId = EventSourcedEntity.actorCounter.incrementAndGet() @@ -224,7 +225,7 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, private[this] final var commandStartTime = 0L private[this] val instrumentation = - CloudstateTelemetry(context.system).eventSourcedEntityInstrumentation(configuration.userFunctionName) + CloudstateTelemetry(context.system).eventSourcedEntityInstrumentation(configuration.entityTypeName) instrumentation.entityActivated() instrumentation.recoveryStarted() @@ -346,9 +347,9 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, } else { instrumentation.persistStarted() var eventsLeft = events.size - persistAll(events) { event => + persistAll(events.map(payload => Tagged(payload, Set(configuration.entityTypeName)))) { event => eventsLeft -= 1 - instrumentation.eventPersisted(event.serializedSize) + instrumentation.eventPersisted(event.payload.asInstanceOf[pbAny].serializedSize) if (eventsLeft <= 0) { // Remove this hack when switching to Akka Persistence Typed instrumentation.persistCompleted() // note: this doesn't include saving snapshots r.snapshot.foreach { snapshot => @@ -448,4 +449,12 @@ final class EventSourcedEntity(configuration: EventSourcedEntity.Configuration, maybeInit(None) relay ! EventSourcedStreamIn(EventSourcedStreamIn.Message.Event(EventSourcedEvent(lastSequenceNr, Some(event)))) } + + override final def onRecoveryFailure(cause: Throwable, event: Option[Any]): Unit = { + // This just logs it + super.onRecoveryFailure(cause, event) + notifyOutstandingRequests("Error recovering event log") + currentCommand = null + stashedCommands = Queue.empty + } } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala index 48ba31e7d..205e9b3d7 100644 --- a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/EventSourcedSupportFactory.scala @@ -83,9 +83,9 @@ class EventSourcedSupportFactory( val methodsWithoutKeys = methodDescriptors.values.filter(_.keyFieldsCount < 1) if (methodsWithoutKeys.nonEmpty) { val offendingMethods = methodsWithoutKeys.map(_.method.getName).mkString(",") - throw new EntityDiscoveryException( + throw EntityDiscoveryException( s"Event sourced entities do not support methods whose parameters do not have at least one field marked as entity_key, " + - "but ${serviceDescriptor.getFullName} has the following methods without keys: ${offendingMethods}" + s"but ${serviceDescriptor.getFullName} has the following methods without keys: $offendingMethods" ) } } diff --git a/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemJournal.scala b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemJournal.scala new file mode 100644 index 000000000..a36976710 --- /dev/null +++ b/proxy/core/src/main/scala/io/cloudstate/proxy/eventsourced/InMemJournal.scala @@ -0,0 +1,220 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This is in the akka package so that we can look up the inmem journal from the read journal +package akka.persistence.cloudstate + +import akka.NotUsed +import akka.actor.{ActorLogging, ActorRef, ExtendedActorSystem} +import akka.persistence.{AtomicWrite, Persistence, PersistentRepr} +import akka.persistence.journal.{AsyncWriteJournal, Tagged} +import akka.persistence.query.scaladsl.{EventsByTagQuery, ReadJournal} +import akka.persistence.query.{EventEnvelope, NoOffset, Offset, ReadJournalProvider, Sequence} +import akka.stream.{ActorMaterializer, Materializer, OverflowStrategy} +import akka.stream.scaladsl.{BroadcastHub, Keep, Source} +import akka.util.Timeout +import com.typesafe.config.Config + +import scala.collection.immutable +import scala.concurrent.Future +import scala.concurrent.duration._ +import scala.util.Try + +object InmemJournal { + case class EventsByTag(tag: String, fromOffset: Long) +} + +private[persistence] class InmemJournal extends AsyncWriteJournal with InmemMessages with ActorLogging { + + import InmemJournal.EventsByTag + + override implicit protected lazy val mat: Materializer = ActorMaterializer() + + override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] = { + for (w <- messages; p <- w.payload) + add(p) + Future.successful(Nil) // all good + } + + override def asyncReadHighestSequenceNr(persistenceId: String, fromSequenceNr: Long): Future[Long] = + Future.successful(highestSequenceNr(persistenceId)) + + override def asyncReplayMessages(persistenceId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)( + recoveryCallback: PersistentRepr => Unit + ): Future[Unit] = { + val highest = highestSequenceNr(persistenceId) + if (highest != 0L && max != 0L) + read(persistenceId, fromSequenceNr, math.min(toSequenceNr, highest), max).foreach(recoveryCallback) + Future.successful(()) + } + + override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] = { + val toSeqNr = math.min(toSequenceNr, highestSequenceNr(persistenceId)) + var snr = 1L + while (snr <= toSeqNr) { + delete(persistenceId, snr) + snr += 1 + } + Future.successful(()) + } + + override def receivePluginInternal: Receive = { + case EventsByTag(tag, fromOffset) => + log.info("Received EventsByTag query for tag {} from offset {}", tag, fromOffset) + sender() ! eventsByTagQuery(tag, fromOffset) + } +} + +class InmemReadJournal(system: ExtendedActorSystem, config: Config) + extends EventsByTagQuery + with ReadJournalProvider + with ReadJournal + with akka.persistence.query.javadsl.ReadJournal { + + private def inmemJournal: ActorRef = Persistence(system).journalFor("inmem-journal") + private implicit val timeout = Timeout(5.seconds) + import akka.pattern.ask + import InmemJournal._ + + override def scaladslReadJournal(): ReadJournal = this + + override def javadslReadJournal(): akka.persistence.query.javadsl.ReadJournal = + // todo this is obviously not right + this + + override def eventsByTag(tag: String, offset: Offset): Source[EventEnvelope, NotUsed] = { + val fromOffset = offset match { + case Sequence(seq) => seq + case NoOffset => 0L + case unsupported => + throw new IllegalArgumentException(s"$unsupported is an unsupported offset type for the in memory read journal") + } + + Source + .fromFutureSource( + (inmemJournal ? EventsByTag(tag, fromOffset)).mapTo[Source[EventEnvelope, NotUsed]] + ) + .mapMaterializedValue(_ => NotUsed) + } +} + +/** + * INTERNAL API. + */ +trait InmemMessages { + private var messages = Map.empty[String, Vector[Message]] + private var allMessages = Vector.empty[Message] + private var offset: Long = 0 + + private val (broadcast, source) = Source + .actorRef[Message](8, OverflowStrategy.fail) + .toMat(BroadcastHub.sink)(Keep.both) + .run() + + protected implicit def mat: Materializer + + def add(p: PersistentRepr): Unit = { + val message = toMessage(p) + + messages = messages + (messages.get(p.persistenceId) match { + case Some(ms) => p.persistenceId -> (ms :+ message) + case None => p.persistenceId -> Vector(message) + }) + + allMessages :+= message + broadcast ! message + } + + def update(pid: String, snr: Long)(f: PersistentRepr => PersistentRepr): Unit = { + messages = messages.get(pid) match { + case Some(ms) => + messages + (pid -> ms.map(sp => if (sp.repr.sequenceNr == snr) sp.copy(repr = f(sp.repr)) else sp)) + case None => messages + } + allMessages = allMessages.map { message => + if (message.repr.persistenceId == pid && message.repr.sequenceNr == snr) { + // todo can tags change? + message.copy(repr = f(message.repr)) + } else { + message + } + } + } + + def delete(pid: String, snr: Long): Unit = { + messages = messages.get(pid) match { + case Some(ms) => + messages + (pid -> ms.filterNot(_.repr.sequenceNr == snr)) + case None => messages + } + allMessages = allMessages.filterNot { message => + message.repr.persistenceId == pid && message.repr.sequenceNr == snr + } + } + + def read(pid: String, fromSnr: Long, toSnr: Long, max: Long): immutable.Seq[PersistentRepr] = + messages.get(pid) match { + case Some(ms) => + ms.view + .dropWhile(_.repr.sequenceNr < fromSnr) + .takeWhile(_.repr.sequenceNr <= toSnr) + .take(safeLongToInt(max)) + .map(_.repr) + .toSeq + case None => Nil + } + + def highestSequenceNr(pid: String): Long = { + val snro = for { + ms <- messages.get(pid) + m <- ms.lastOption + } yield m.repr.sequenceNr + snro.getOrElse(0L) + } + + def eventsByTagQuery(tag: String, fromOffset: Long): Source[EventEnvelope, NotUsed] = + // Technically, there's a race condition that could result in messages being dropped here, if a new message is + // persisted after this returns, but before the subscriber to the query materializes this stream, that message + // will be dropped. But this is only the in memory journal which makes no sense to use in production. + Source(allMessages) + .concat(source) + .dropWhile(_.offset < fromOffset) + .filter(_.tags(tag)) + .map { message => + EventEnvelope(Sequence(message.offset), + message.repr.persistenceId, + message.repr.sequenceNr, + message.repr.payload) + } + + private def safeLongToInt(l: Long): Int = + if (Int.MaxValue < l) Int.MaxValue else l.toInt + + private def toMessage(repr: PersistentRepr): Message = { + offset += 1 + repr.payload match { + case Tagged(payload, tags) => Message(offset, repr.withPayload(payload), tags) + case _ => Message(offset, repr, Set.empty) + } + } + +} + +private case class Message( + offset: Long, + repr: PersistentRepr, + tags: Set[String] +) diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/eventsourced/EventSourcedRestartSpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/eventsourced/EventSourcedRestartSpec.scala index 89e13aa68..48ebc692d 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/eventsourced/EventSourcedRestartSpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/eventsourced/EventSourcedRestartSpec.scala @@ -64,7 +64,7 @@ class EventSourcedRestartSpec extends AbstractTelemetrySpec { val entityConfiguration = EventSourcedEntity.Configuration( serviceName = "service", - userFunctionName = "test", + entityTypeName = "test", passivationTimeout = 30.seconds, sendQueueSize = 100 ) diff --git a/proxy/core/src/test/scala/io/cloudstate/proxy/telemetry/EventSourcedInstrumentationSpec.scala b/proxy/core/src/test/scala/io/cloudstate/proxy/telemetry/EventSourcedInstrumentationSpec.scala index 151521557..5bbd263e1 100644 --- a/proxy/core/src/test/scala/io/cloudstate/proxy/telemetry/EventSourcedInstrumentationSpec.scala +++ b/proxy/core/src/test/scala/io/cloudstate/proxy/telemetry/EventSourcedInstrumentationSpec.scala @@ -66,7 +66,7 @@ class EventSourcedInstrumentationSpec extends AbstractTelemetrySpec { val entityConfiguration = EventSourcedEntity.Configuration( serviceName = "service", - userFunctionName = "test", + entityTypeName = "test", passivationTimeout = 30.seconds, sendQueueSize = 100 ) diff --git a/proxy/jdbc/src/main/resources/jdbc-common.conf b/proxy/jdbc/src/main/resources/jdbc-common.conf index 5e501c7cc..7b8ed7036 100644 --- a/proxy/jdbc/src/main/resources/jdbc-common.conf +++ b/proxy/jdbc/src/main/resources/jdbc-common.conf @@ -6,7 +6,14 @@ cloudstate.proxy { persistence.store = "jdbc" } - eventsourced-entity.journal-enabled = true + eventsourced-entity { + journal-enabled = true + read-journal = jdbc-read-journal + projection-support { + enabled = true + class = "io.cloudstate.proxy.jdbc.SlickProjectionSupport" + } + } } akka { diff --git a/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickProjectionSupport.scala b/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickProjectionSupport.scala new file mode 100644 index 000000000..8517a25a7 --- /dev/null +++ b/proxy/jdbc/src/main/scala/io/cloudstate/proxy/jdbc/SlickProjectionSupport.scala @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.proxy.jdbc + +import akka.Done +import akka.actor.CoordinatedShutdown +import akka.actor.typed.ActorSystem +import akka.projection.{ProjectionContext, ProjectionId} +import akka.projection.scaladsl.{AtLeastOnceFlowProjection, SourceProvider} +import akka.projection.slick.SlickProjection +import akka.stream.scaladsl.FlowWithContext +import io.cloudstate.proxy.eventing.ProjectionSupport +import slick.basic.DatabaseConfig +import slick.jdbc.JdbcProfile + +import scala.concurrent.Future + +class SlickProjectionSupport(implicit system: ActorSystem[_]) extends ProjectionSupport { + import system.executionContext + + private lazy val databaseConfig = { + val dbConfig = + DatabaseConfig.forConfig[JdbcProfile]("akka-persistence-jdbc.shared-databases.slick", system.settings.config) + CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeActorSystemTerminate, + "shutdown-slick-projection-factory-connection-pool") { () => + dbConfig.db.shutdown.map(_ => Done) + } + dbConfig + } + + override def create[Offset, Envelope]( + projectionId: ProjectionId, + sourceProvider: SourceProvider[Offset, Envelope], + flow: FlowWithContext[Envelope, ProjectionContext, Done, ProjectionContext, _] + ): AtLeastOnceFlowProjection[Offset, Envelope] = + SlickProjection.atLeastOnceFlow(projectionId, sourceProvider, databaseConfig, flow) + + override def prepare(): Future[Done] = + SlickProjection.createOffsetTableIfNotExists(databaseConfig) +} diff --git a/proxy/postgres/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-jdbc/reflect-config.json.conf b/proxy/postgres/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-jdbc/reflect-config.json.conf index c7c9d21b3..da3e28d44 100644 --- a/proxy/postgres/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-jdbc/reflect-config.json.conf +++ b/proxy/postgres/src/graal/META-INF/native-image/io.cloudstate/cloudstate-proxy-jdbc/reflect-config.json.conf @@ -11,4 +11,8 @@ name: "io.cloudstate.proxy.valueentity.store.jdbc.JdbcStore" allDeclaredConstructors: true } +{ + name: "io.cloudstate.proxy.jdbc.SlickProjectionSupport" + methods: [{name:"",parameterTypes: ["akka.actor.typed.ActorSystem"]}] +} ] diff --git a/samples/java-pingpong/src/main/protos/pingpong/pingpong.proto b/samples/java-pingpong/src/main/protos/pingpong/pingpong.proto index 6a5ead0bf..fa1e14f59 100644 --- a/samples/java-pingpong/src/main/protos/pingpong/pingpong.proto +++ b/samples/java-pingpong/src/main/protos/pingpong/pingpong.proto @@ -47,25 +47,33 @@ service PingPongService { rpc Ping(PongSent) returns (PingSent) { option (.cloudstate.eventing) = { - out: "pings", + out { + topic: "pings", + }, }; } rpc Pong(PingSent) returns (PongSent) { option (.cloudstate.eventing) = { - out: "pongs", + out { + topic: "pongs", + }, }; } rpc SeenPong(PongSent) returns (google.protobuf.Empty) { option (.cloudstate.eventing) = { - in: "pongs", + in { + topic: "pongs", + }, }; } rpc SeenPing(PingSent) returns (google.protobuf.Empty) { option (.cloudstate.eventing) = { - in: "pings", + in { + topic: "pings", + }, }; } diff --git a/samples/js-shopping-cart-load-generator/src/main/scala/io/cloudstate/loadgenerator/GenerateLoad.scala b/samples/js-shopping-cart-load-generator/src/main/scala/io/cloudstate/loadgenerator/GenerateLoad.scala index b962058dc..5b907c005 100644 --- a/samples/js-shopping-cart-load-generator/src/main/scala/io/cloudstate/loadgenerator/GenerateLoad.scala +++ b/samples/js-shopping-cart-load-generator/src/main/scala/io/cloudstate/loadgenerator/GenerateLoad.scala @@ -15,18 +15,24 @@ import com.google.protobuf.empty.Empty import scala.concurrent.duration._ import scala.util.Random -object GenerateLoad extends App{ - +object GenerateLoad extends App { val system = ActorSystem() - val loadGenerator = system.actorOf(BackoffSupervisor.props(BackoffOpts.onFailure( - childProps = Props[LoadGeneratorActor], - childName = "load-generator", - minBackoff = 3.seconds, - maxBackoff = 30.seconds, - randomFactor = 0.2d - ).withReplyWhileStopped(Done)), "load-generator-supervisor") + val loadGenerator = system.actorOf( + BackoffSupervisor.props( + BackoffOpts + .onFailure( + childProps = Props[LoadGeneratorActor], + childName = "load-generator", + minBackoff = 3.seconds, + maxBackoff = 30.seconds, + randomFactor = 0.2d + ) + .withReplyWhileStopped(Done) + ), + "load-generator-supervisor" + ) CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseServiceRequestsDone, "stop-making-requests") { () => import akka.pattern.ask @@ -97,7 +103,8 @@ class LoadGeneratorActor extends Actor with Timers { implicit val ec = context.dispatcher private val clients = { - val settings = GrpcClientSettings.connectToServiceAt(serviceName, servicePort) + val settings = GrpcClientSettings + .connectToServiceAt(serviceName, servicePort) .withTls(false) .withDeadline(1.minute) @@ -119,25 +126,23 @@ class LoadGeneratorActor extends Actor with Timers { override def receive = starting - override def postStop(): Unit = { + override def postStop(): Unit = if (stoppingRef != null) { stoppingRef ! Done } - } - override def preStart(): Unit = { + override def preStart(): Unit = clients.head.getCart(GetShoppingCart("user1")) pipeTo self - } /** - * Let's say that we want to achieve 13 requests per second, with a 250ms tick interval. To do this, we need to make - * 3.25 requests per tick. But you can't make 0.25 requests. If we round this up or down, we're going to end up with - * a requests per second that is greater or less than our desired rate. We could track the rate across ticks, but - * there's no simple way to do that, especially during warmup, and taking back off due to response lag into - * consideration. Instead, we round 3.25 up or down randomly, weighted according to its decimal part, so on average, - * 75% of the time we round down, 25% we round up, and therefore end up with an average of 13 requests a second, as - * desired. - */ + * Let's say that we want to achieve 13 requests per second, with a 250ms tick interval. To do this, we need to make + * 3.25 requests per tick. But you can't make 0.25 requests. If we round this up or down, we're going to end up with + * a requests per second that is greater or less than our desired rate. We could track the rate across ticks, but + * there's no simple way to do that, especially during warmup, and taking back off due to response lag into + * consideration. Instead, we round 3.25 up or down randomly, weighted according to its decimal part, so on average, + * 75% of the time we round down, 25% we round up, and therefore end up with an average of 13 requests a second, as + * desired. + */ private def roundRandomWeighted(d: Double): Int = { val floor = d.floor val remainder = d - floor @@ -217,9 +222,15 @@ class LoadGeneratorActor extends Actor with Timers { val responsesASecond = responsesReceivedSinceLastReport.toDouble / reportInterval * nanos val failuresASecond = failuresSinceLastReport.toDouble / reportInterval * nanos - println("%s Report: %4.0f req/s %4.0f success/s %4.0f failure/s with %d outstanding requests".format( - dateTimeFormatter.format(ZonedDateTime.now()), - requestsASecond, responsesASecond, failuresASecond, outstandingRequests)) + println( + "%s Report: %4.0f req/s %4.0f success/s %4.0f failure/s with %d outstanding requests".format( + dateTimeFormatter.format(ZonedDateTime.now()), + requestsASecond, + responsesASecond, + failuresASecond, + outstandingRequests + ) + ) lastReportNanos = reportTime requestsMadeSinceLastReport = 0 @@ -229,7 +240,7 @@ class LoadGeneratorActor extends Actor with Timers { def stopping: Receive = { case Tick => - // Ignore + // Ignore case _: Cart | _: Empty => responsesReceivedSinceLastReport += 1 @@ -246,4 +257,4 @@ class LoadGeneratorActor extends Actor with Timers { context stop self } } -} \ No newline at end of file +} diff --git a/samples/js-shopping-cart/index.js b/samples/js-shopping-cart/index.js index 009d03d8a..e56d6ddec 100644 --- a/samples/js-shopping-cart/index.js +++ b/samples/js-shopping-cart/index.js @@ -14,4 +14,11 @@ * limitations under the License. */ -require("./shoppingcart.js").start(); +const CloudState = require("cloudstate").CloudState; + +const server = new CloudState(); +server.addEntity(require("./shoppingcart")); +server.addEntity(require("./products")); +server.addEntity(require("./projection")); + +server.start(); \ No newline at end of file diff --git a/samples/js-shopping-cart/package.json b/samples/js-shopping-cart/package.json index 4169bbc0e..9cda6e35c 100644 --- a/samples/js-shopping-cart/package.json +++ b/samples/js-shopping-cart/package.json @@ -39,10 +39,10 @@ }, "scripts": { "test": "mocha", - "prestart": "compile-descriptor ../../protocols/example/shoppingcart/shoppingcart.proto", - "pretest": "compile-descriptor ../../protocols/example/shoppingcart/shoppingcart.proto", - "postinstall": "compile-descriptor ../../protocols/example/shoppingcart/shoppingcart.proto", "prepare-node-support": "cd ../../node-support && npm install && cd ../samples/js-shopping-cart", + "prestart": "compile-descriptor -I../../protocols/example ../../protocols/example/shoppingcart/shoppingcart.proto ../../protocols/example/shoppingcart/projection.proto ../../protocols/example/shoppingcart/products.proto", + "pretest": "compile-descriptor -I../../protocols/example ../../protocols/example/shoppingcart/shoppingcart.proto ../../protocols/example/shoppingcart/projection.proto ../../protocols/example/shoppingcart/products.proto", + "postinstall": "compile-descriptor -I../../protocols/example shoppingcart/shoppingcart.proto shoppingcart/projection.proto shoppingcart/products.proto", "start": "node index.js", "start-no-prestart": "node index.js", "dockerbuild": "npm run prepare-node-support && docker build -f ../../Dockerfile.js-shopping-cart -t ${DOCKER_PUBLISH_TO:-cloudstateio}/js-shopping-cart:latest ../..", diff --git a/samples/js-shopping-cart/products.js b/samples/js-shopping-cart/products.js new file mode 100644 index 000000000..39b2a5b1e --- /dev/null +++ b/samples/js-shopping-cart/products.js @@ -0,0 +1,69 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const crdt = require("cloudstate").crdt; + +const entity = new crdt.Crdt( + "shoppingcart/products.proto", + "com.example.shoppingcart.ShoppingCartProducts", + { + includeDirs: ["../../protocols/example"] + } +); + +entity.commandHandlers = { + UpdateCartQuantity: updateCartQuantity, + RemoveProductFromCart: removeProductFromCart, + GetProduct: getProduct, +}; + +// The default value is an ORMap, whose default values are GCounters, so essentially, this is a map of user ids to +// the quantity of items that that user has in their cart. +// There are some problems with using this though, if a user removes an item from their cart, and then immediately +// adds it back, then it's possible when that's replicated that the old counter value will survive, this is due to how +// ORMap works. +entity.defaultValue = () => new crdt.ORMap(); + +entity.onStateSet = map => { + map.defaultValue = () => new crdt.GCounter(); +}; + +function updateCartQuantity(request, ctx) { + console.log("Product entity received update cart quantity for product " + request.productId); + ctx.state.get(request.userId).increment(request.quantity); + return {}; +} + +function removeProductFromCart(request, ctx) { + console.log("Product entity received remove product from cart for product " + request.productId); + ctx.state.delete(request.userId); + return {}; +} + +function getProduct(request, ctx) { + let totalQuantity = 0; + for (const cart of ctx.state.values()) { + totalQuantity += cart.value; + } + return { + totalQuantities: totalQuantity, + totalCarts: ctx.state.size + } +} + + +// Export the entity +module.exports = entity; diff --git a/samples/js-shopping-cart/projection.js b/samples/js-shopping-cart/projection.js new file mode 100644 index 000000000..5961a1716 --- /dev/null +++ b/samples/js-shopping-cart/projection.js @@ -0,0 +1,54 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +const Action = require("cloudstate").Action; + +const projection = new Action( + ["shoppingcart/projection.proto", "shoppingcart/products.proto"], + "com.example.shoppingcart.ShoppingCartProjection", + { + includeDirs: ["../../protocols/example"] + } +); + +const products = projection.root.lookupService("com.example.shoppingcart.ShoppingCartProducts").methods; + +projection.commandHandlers = { + HandleItemAdded: handleItemAdded, + HandleItemRemoved: handleItemRemoved, +}; + +function handleItemAdded(itemAdded, ctx) { + console.log("Projection received itemAdded event for user " + + ctx.cloudevent.subject + " and product " + itemAdded.item.productId); + ctx.forward(products.UpdateCartQuantity, { + productId: itemAdded.item.productId, + userId: ctx.cloudevent.subject, + quantity: itemAdded.item.quantity, + }); +} + +function handleItemRemoved(itemRemoved, ctx) { + console.log("Projection received itemRemoved event for user " + + ctx.cloudevent.subject + " and product " + itemRemoved.productId); + ctx.forward(products.RemoveProductFromCart, { + productId: itemRemoved.productId, + userId: ctx.cloudevent.subject, + }); +} + +// Export the entity +module.exports = projection; diff --git a/samples/js-shopping-cart/user-function.desc b/samples/js-shopping-cart/user-function.desc index 1e48bd4e4b2545450e00ec04704a5f22cd503f49..20a8d51e9cf99102c21b2e185e4a00da9e04d817 100644 GIT binary patch delta 1910 zcmb`H&u<$=6vwf*cGu73*Qg|^abmhj9b0MEYAW%gQ7U1(KMX-lHVsfyDc$Z4?t;Cu z-JPu(F6J-bGW-v4M2f_(Qx06pfude3q#n3)=Elr;ZLgCw6p7=!dHa3uefIn2uUn7p z?{3d4e#>{|O6)YyO-o@R#@ZLpP6Eq@a-f)U4sZB&I*4nIUz)7g_!?ruy-@2eOx zO*$9VJ#*y z!1jXzbEDXN@)@?o3b4)M6?UbN`zZHvU{}9P!t{2`0Gk6T zNgr^|P3tW}T5a6>WB0;O^{X9^?rCR!txds1Zmi$&im)`IFf|nK!@elwyNBhY+y7Re zikg$u`h3KM1AzN!c)j0_T(HPJf#{vxDt_42D4gB#Z2qc=|GO2M^3^l&gOG16(g30_ zM%FJ8vj&RS%^EIDzLSFb?FynleC! z#7x2JK@!d!GM^YaKr<$wqxjJ!;g5}@7tt&)atLa{YdsSkqKe)Wg7stX{!Q}^zz`-{ VW>3mN1ln782Xod^MdFoP=!{qh#b+|ZO z%Rolur3>+KF=ggSFanL|3<9!*1h|;Qix$=Sn>%d}a#7G$)n`TO}szXhf>}aq$JE=H{2B z5>%_9sbs*#@19x`oRMEp0Cb*nVo?bWlYeV0mz~ed#e<@>l8YVaCa`-{CpQ{OO}?qA g!l*y_pQZ};tgRC4T&$TurxtHk(h_H!tY;(*0LEo*IRF3v diff --git a/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala b/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala index 26aab5702..c5554c7f8 100644 --- a/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala +++ b/tck/src/main/scala/io/cloudstate/tck/CloudStateTCK.scala @@ -27,16 +27,19 @@ import com.example.valueentity.shoppingcart.shoppingcart.{ ShoppingCart => ValueEntityShoppingCart, ShoppingCartClient => ValueEntityShoppingCartClient } -import com.google.protobuf.DescriptorProtos +import com.google.protobuf.{ByteString, DescriptorProtos} import com.google.protobuf.any.{Any => ScalaPbAny} import com.typesafe.config.{Config, ConfigFactory} import io.cloudstate.protocol.action._ import io.cloudstate.protocol.crdt._ import io.cloudstate.protocol.value_entity.ValueEntity import io.cloudstate.protocol.event_sourced._ +import io.cloudstate.protocol.entity._ import io.cloudstate.tck.model.valueentity.valueentity.{ValueEntityTckModel, ValueEntityTwo} import io.cloudstate.tck.model.action.{ActionTckModel, ActionTwo} import io.cloudstate.tck.model.crdt.{CrdtTckModel, CrdtTwo} +import io.cloudstate.tck.model.eventlogeventing.{EmitEventRequest, EventLogSubscriberModel} +import io.cloudstate.tck.model.eventlogeventing import io.cloudstate.testkit.InterceptService.InterceptorSettings import io.cloudstate.testkit.eventsourced.EventSourcedMessages import io.cloudstate.testkit.{InterceptService, ServiceAddress, TestClient, TestProtocol} @@ -78,6 +81,10 @@ class CloudStateTCK(description: String, settings: CloudStateTCK.Settings) private[this] final val client = TestClient(settings.proxy.host, settings.proxy.port) private[this] final val eventSourcedShoppingCartClient = EventSourcedShoppingCartClient(client.settings)(system) private[this] final val valueEntityShoppingCartClient = ValueEntityShoppingCartClient(client.settings)(system) + private[this] final val eventLogEventingEventSourcedEntityOne = + eventlogeventing.EventSourcedEntityOneClient(client.settings)(system) + private[this] final val eventLogEventingEventSourcedEntityTwo = + eventlogeventing.EventSourcedEntityTwoClient(client.settings)(system) private[this] final val protocol = TestProtocol(settings.service.host, settings.service.port) @@ -3141,5 +3148,84 @@ class CloudStateTCK(description: String, settings: CloudStateTCK.Settings) verifyGetCart(session, "cart:1", Item("product:2", "Product2", 33)) // check final state } } + + "verify proxy test: event log subscriptions" must { + def eventLogSubscriptionTest(test: => Any): Unit = + testFor(EventLogSubscriberModel)(test) + + def emitEventOne(id: String, step: eventlogeventing.ProcessStep.Step) = + eventLogEventingEventSourcedEntityOne.emitEvent( + EmitEventRequest(id, + EmitEventRequest.Event + .EventOne(eventlogeventing.EventOne(Some(eventlogeventing.ProcessStep(step))))) + ) + def emitReplyEventOne(id: String, message: String) = + emitEventOne(id, eventlogeventing.ProcessStep.Step.Reply(eventlogeventing.Reply(message))) + def emitForwardEventOne(id: String, message: String) = + emitEventOne(id, eventlogeventing.ProcessStep.Step.Forward(eventlogeventing.Forward(message))) + def verifyEventSourcedInitCommandReply(id: String) = { + val connection = interceptor.expectEventSourcedConnection() + val init = connection.expectClientMessage[EventSourcedStreamIn.Message.Init] + init.value.serviceName must ===(eventlogeventing.EventSourcedEntityOne.name) + init.value.entityId must ===(id) + connection.expectClientMessage[EventSourcedStreamIn.Message.Command] + connection.expectServiceMessage[EventSourcedStreamOut.Message.Reply] + } + def verifySubscriberCommandResponse(step: eventlogeventing.ProcessStep.Step) = { + val subscriberConnection = interceptor.expectActionUnaryConnection() + val eventOneIn = eventlogeventing.EventOne.parseFrom( + subscriberConnection.command.payload.fold(ByteString.EMPTY)(_.value).newCodedInput() + ) + eventOneIn.step must ===(Some(eventlogeventing.ProcessStep(step))) + subscriberConnection.expectResponse() + } + def verifySubscriberReplyCommand(id: String, message: String) = { + val response = + verifySubscriberCommandResponse(eventlogeventing.ProcessStep.Step.Reply(eventlogeventing.Reply(message))) + response.response.isReply must ===(true) + val reply = eventlogeventing.Response.parseFrom(response.response.reply.get.payload.get.value.newCodedInput()) + reply.id must ===(id) + reply.message must ===(message) + } + def verifySubscriberForwardCommand(id: String, message: String) = { + val response = + verifySubscriberCommandResponse(eventlogeventing.ProcessStep.Step.Forward(eventlogeventing.Forward(message))) + response.response.isForward must ===(true) + val subscriberConnection = interceptor.expectActionUnaryConnection() + subscriberConnection.command.name must ===("Effect") + } + "consume an event" in eventLogSubscriptionTest { + emitReplyEventOne("eventlogeventing:1", "some message") + verifyEventSourcedInitCommandReply("eventlogeventing:1") + verifySubscriberReplyCommand("eventlogeventing:1", "some message") + } + + "forward a consumed event" in eventLogSubscriptionTest { + emitForwardEventOne("eventlogeventing:2", "some message") + verifyEventSourcedInitCommandReply("eventlogeventing:2") + verifySubscriberForwardCommand("eventlogeventing:2", "some message") + } + + "process json events" in eventLogSubscriptionTest { + eventLogEventingEventSourcedEntityTwo.emitJsonEvent( + eventlogeventing.JsonEvent("eventlogeventing:3", "some json message") + ) + + val connection = interceptor.expectEventSourcedConnection() + val init = connection.expectClientMessage[EventSourcedStreamIn.Message.Init] + init.value.serviceName must ===(eventlogeventing.EventSourcedEntityTwo.name) + init.value.entityId must ===("eventlogeventing:3") + connection.expectClientMessage[EventSourcedStreamIn.Message.Command] + val reply = connection.expectServiceMessage[EventSourcedStreamOut.Message.Reply] + reply.value.events must have size (1) + reply.value.events.head.typeUrl must startWith("json.cloudstate.io/") + + val subscriberConnection = interceptor.expectActionUnaryConnection() + val response = subscriberConnection.expectResponse() + val parsed = eventlogeventing.Response.parseFrom(response.response.reply.get.payload.get.value.newCodedInput()) + parsed.id must ===("eventlogeventing:3") + parsed.message must ===("some json message") + } + } } } diff --git a/testkit/src/main/scala/io/cloudstate/testkit/InterceptService.scala b/testkit/src/main/scala/io/cloudstate/testkit/InterceptService.scala index 3a256e037..1bac1363c 100644 --- a/testkit/src/main/scala/io/cloudstate/testkit/InterceptService.scala +++ b/testkit/src/main/scala/io/cloudstate/testkit/InterceptService.scala @@ -22,6 +22,7 @@ import akka.http.scaladsl.Http import akka.testkit.{TestKit, TestProbe} import com.typesafe.config.{Config, ConfigFactory} import io.cloudstate.testkit.InterceptService.InterceptorSettings +import io.cloudstate.testkit.action.InterceptActionService import io.cloudstate.testkit.discovery.InterceptEntityDiscovery import io.cloudstate.testkit.eventsourced.InterceptEventSourcedService import io.cloudstate.testkit.valueentity.InterceptValueEntityService @@ -38,6 +39,7 @@ final class InterceptService(settings: InterceptorSettings) { private val entityDiscovery = new InterceptEntityDiscovery(context) private val eventSourced = new InterceptEventSourcedService(context) private val valueBased = new InterceptValueEntityService(context) + private val action = new InterceptActionService(context) import context.system @@ -45,7 +47,7 @@ final class InterceptService(settings: InterceptorSettings) { Await.result( Http().bindAndHandleAsync( - handler = entityDiscovery.handler orElse eventSourced.handler orElse valueBased.handler, + handler = entityDiscovery.handler orElse eventSourced.handler orElse valueBased.handler orElse action.handler, interface = settings.bind.host, port = settings.bind.port ), @@ -58,11 +60,22 @@ final class InterceptService(settings: InterceptorSettings) { def expectValueBasedConnection(): InterceptValueEntityService.Connection = valueBased.expectConnection() + def expectActionUnaryConnection(): InterceptActionService.UnaryConnection = action.expectUnaryConnection() + + def expectActionStreamedInConnection(): InterceptActionService.StreamedInConnection = + action.expectStreamedInConnection() + + def expectActionStreamedOutConnection(): InterceptActionService.StreamedOutConnection = + action.expectStreamedOutConnection() + + def expectActionStreamedConnection(): InterceptActionService.StreamedConnection = action.expectStreamedConnection() + def terminate(): Unit = { entityDiscovery.terminate() eventSourced.terminate() valueBased.terminate() context.terminate() + action.terminate() } } diff --git a/testkit/src/main/scala/io/cloudstate/testkit/action/InterceptActionService.scala b/testkit/src/main/scala/io/cloudstate/testkit/action/InterceptActionService.scala new file mode 100644 index 000000000..7aa46f834 --- /dev/null +++ b/testkit/src/main/scala/io/cloudstate/testkit/action/InterceptActionService.scala @@ -0,0 +1,160 @@ +/* + * Copyright 2019 Lightbend Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package io.cloudstate.testkit.action + +import akka.NotUsed +import akka.http.scaladsl.model.{HttpRequest, HttpResponse} +import akka.stream.scaladsl.{Sink, Source} +import akka.testkit.TestProbe +import io.cloudstate.protocol.action.{ + ActionCommand, + ActionProtocol, + ActionProtocolClient, + ActionProtocolHandler, + ActionResponse +} +import io.cloudstate.testkit.InterceptService.InterceptorContext + +import scala.concurrent.Future +import scala.util.{Failure, Success} + +final class InterceptActionService(context: InterceptorContext) { + import InterceptActionService._ + + private val interceptor = new ActionInterceptor(context) + + def expectUnaryConnection(): UnaryConnection = context.probe.expectMsgType[UnaryConnection] + def expectStreamedInConnection(): StreamedInConnection = context.probe.expectMsgType[StreamedInConnection] + def expectStreamedOutConnection(): StreamedOutConnection = context.probe.expectMsgType[StreamedOutConnection] + def expectStreamedConnection(): StreamedConnection = context.probe.expectMsgType[StreamedConnection] + + def handler: PartialFunction[HttpRequest, Future[HttpResponse]] = + ActionProtocolHandler.partial(interceptor)(context.system) + + def terminate(): Unit = interceptor.terminate() +} + +object InterceptActionService { + case object Complete + final case class Error(cause: Throwable) + + final class ActionInterceptor(context: InterceptorContext) extends ActionProtocol { + import context.system.dispatcher + + private val client = ActionProtocolClient(context.clientSettings)(context.system) + + override def handleUnary(in: ActionCommand): Future[ActionResponse] = { + val connection = new UnaryConnection(context, in) + context.probe.ref ! connection + val response = client.handleUnary(in) + response.onComplete { + case Success(r) => connection.out.testActor ! r + case Failure(e) => connection.out.testActor ! Error(e) + } + response + } + + override def handleStreamedIn(in: Source[ActionCommand, NotUsed]): Future[ActionResponse] = { + val connection = new StreamedInConnection(context) + context.probe.ref ! connection + val response = client.handleStreamedIn(in.alsoTo(connection.inSink)) + response.onComplete { + case Success(r) => connection.out.testActor ! r + case Failure(e) => connection.out.testActor ! Error(e) + } + response + } + + override def handleStreamedOut(in: ActionCommand): Source[ActionResponse, NotUsed] = { + val connection = new StreamedOutConnection(context, in) + context.probe.ref ! connection + val out = client.handleStreamedOut(in) + out.alsoTo(connection.outSink) + } + + override def handleStreamed(in: Source[ActionCommand, NotUsed]): Source[ActionResponse, NotUsed] = { + val connection = new StreamedConnection(context) + context.probe.ref ! connection + val out = client.handleStreamed(in.alsoTo(connection.inSink)) + out.alsoTo(connection.outSink) + } + + def terminate(): Unit = client.close() + } + + final class UnaryConnection(context: InterceptorContext, val command: ActionCommand) { + private[testkit] val out = TestProbe("UnaryConnectionOutProbe")(context.system) + def expectResponse(): ActionResponse = + out.expectMsgType[ActionResponse] + } + + final class StreamedInConnection(context: InterceptorContext) { + private[this] val in = TestProbe("StreamedInConnectionIn")(context.system) + private[testkit] val out = TestProbe("StreamedInConnectionOut")(context.system) + + private[testkit] def inSink: Sink[ActionCommand, NotUsed] = Sink.actorRef(in.ref, Complete, Error.apply) + + def expectResponse(): ActionResponse = + out.expectMsgType[ActionResponse] + + def expectCommand(): ActionCommand = + in.expectMsgType[ActionCommand] + + def expectInComplete(): StreamedInConnection = { + in.expectMsg(Complete) + this + } + } + + final class StreamedOutConnection(context: InterceptorContext, val command: ActionCommand) { + private[testkit] val out = TestProbe("StreamedOutConnectionOut")(context.system) + + private[testkit] def outSink: Sink[ActionResponse, NotUsed] = Sink.actorRef(out.ref, Complete, Error.apply) + + def expectResponse(): ActionResponse = + out.expectMsgType[ActionResponse] + + def expectOutComplete(): StreamedOutConnection = { + out.expectMsg(Complete) + this + } + } + + final class StreamedConnection(context: InterceptorContext) { + private[this] val in = TestProbe("StreamedConnectionIn")(context.system) + private[this] val out = TestProbe("StreamedConnectionOut")(context.system) + + private[testkit] def inSink: Sink[ActionCommand, NotUsed] = Sink.actorRef(in.ref, Complete, Error.apply) + private[testkit] def outSink: Sink[ActionResponse, NotUsed] = Sink.actorRef(out.ref, Complete, Error.apply) + + def expectResponse(): ActionResponse = + out.expectMsgType[ActionResponse] + + def expectOutComplete(): StreamedConnection = { + out.expectMsg(Complete) + this + } + + def expectCommand(): ActionCommand = + in.expectMsgType[ActionCommand] + + def expectInComplete(): StreamedConnection = { + in.expectMsg(Complete) + this + } + } +} diff --git a/testkit/src/main/scala/io/cloudstate/testkit/eventsourced/InterceptEventSourcedService.scala b/testkit/src/main/scala/io/cloudstate/testkit/eventsourced/InterceptEventSourcedService.scala index 207eb10c0..55e3facc8 100644 --- a/testkit/src/main/scala/io/cloudstate/testkit/eventsourced/InterceptEventSourcedService.scala +++ b/testkit/src/main/scala/io/cloudstate/testkit/eventsourced/InterceptEventSourcedService.scala @@ -71,6 +71,13 @@ object InterceptEventSourcedService { this } + def expectClientMessage[T](implicit classTag: ClassTag[T]): T = { + val message = in.expectMsgType[EventSourcedStreamIn].message + assert(classTag.runtimeClass.isInstance(message), + s"expected message ${classTag.runtimeClass}, found ${message.getClass} ($message)") + message.asInstanceOf[T] + } + def expectService(message: EventSourcedStreamOut.Message): Connection = { out.expectMsg(EventSourcedStreamOut(message)) this