diff --git a/backend.go b/backend.go index 5fa2b042..6ee98570 100644 --- a/backend.go +++ b/backend.go @@ -7,7 +7,6 @@ import ( "strings" "github.com/gomodule/redigo/redis" - "github.com/nyaruka/gocommon/aws/cwatch" "github.com/nyaruka/gocommon/httpx" "github.com/nyaruka/gocommon/urns" ) @@ -68,7 +67,7 @@ type Backend interface { // WriteChannelLog writes the passed in channel log to our backend WriteChannelLog(context.Context, *ChannelLog) error - // PopNextOutgoingMsg returns the next message that needs to be sent, callers should call MarkOutgoingMsgComplete with the + // PopNextOutgoingMsg returns the next message that needs to be sent, callers should call OnSendComplete with the // returned message when they have dealt with the message (regardless of whether it was sent or not) PopNextOutgoingMsg(context.Context) (MsgOut, error) @@ -80,10 +79,11 @@ type Backend interface { // a message is being forced in being resent by a user ClearMsgSent(context.Context, MsgID) error - // MarkOutgoingMsgComplete marks the passed in message as having been processed. Note this should be called even in the case - // of errors during sending as it will manage the number of active workers per channel. The status parameter can be - // used to determine any sort of deduping of msg sends - MarkOutgoingMsgComplete(context.Context, MsgOut, StatusUpdate) + // OnSendComplete is called when the sender has finished trying to send a message + OnSendComplete(context.Context, MsgOut, StatusUpdate, *ChannelLog) + + // OnReceiveComplete is called when the server has finished handling an incoming request + OnReceiveComplete(context.Context, Channel, []Event, *ChannelLog) // SaveAttachment saves an attachment to backend storage SaveAttachment(context.Context, Channel, string, []byte, string) (string, error) @@ -106,9 +106,6 @@ type Backend interface { // RedisPool returns the redisPool for this backend RedisPool() *redis.Pool - - // CloudWatch return the CloudWatch service for this backend - CloudWatch() *cwatch.Service } // Media is a resolved media object that can be used as a message attachment diff --git a/backends/rapidpro/backend.go b/backends/rapidpro/backend.go index 709d30ab..53634db7 100644 --- a/backends/rapidpro/backend.go +++ b/backends/rapidpro/backend.go @@ -50,13 +50,6 @@ func init() { courier.RegisterBackend("rapidpro", newBackend) } -type stats struct { - // both sqlx and redis provide wait stats which are cummulative that we need to convert into increments by - // tracking their previous values - dbWaitDuration time.Duration - redisWaitDuration time.Duration -} - type backend struct { config *courier.Config @@ -94,7 +87,7 @@ type backend struct { // tracking of external ids of messages we've sent in case we need one before its status update has been written sentExternalIDs *redisx.IntervalHash - stats stats + stats *StatsCollector } // NewBackend creates a new RapidPro backend @@ -131,6 +124,8 @@ func newBackend(cfg *courier.Config) courier.Backend { receivedExternalIDs: redisx.NewIntervalHash("seen-external-ids", time.Hour*24, 2), // 24 - 48 hours sentIDs: redisx.NewIntervalSet("sent-ids", time.Hour, 2), // 1 - 2 hours sentExternalIDs: redisx.NewIntervalHash("sent-external-ids", time.Hour, 2), // 1 - 2 hours + + stats: NewStatsCollector(), } } @@ -194,7 +189,6 @@ func (b *backend) Start() error { if err != nil { return err } - b.cw.StartQueue(time.Second * 3) // check attachment bucket access if err := b.s3.Test(ctx, b.config.S3AttachmentsBucket); err != nil { @@ -253,8 +247,6 @@ func (b *backend) Stop() error { // wait for our threads to exit b.waitGroup.Wait() - // stop cloudwatch service - b.cw.StopQueue() return nil } @@ -464,8 +456,8 @@ func (b *backend) ClearMsgSent(ctx context.Context, id courier.MsgID) error { return b.sentIDs.Rem(rc, id.String()) } -// MarkOutgoingMsgComplete marks the passed in message as having completed processing, freeing up a worker for that channel -func (b *backend) MarkOutgoingMsgComplete(ctx context.Context, msg courier.MsgOut, status courier.StatusUpdate) { +// OnSendComplete is called when the sender has finished trying to send a message +func (b *backend) OnSendComplete(ctx context.Context, msg courier.MsgOut, status courier.StatusUpdate, clog *courier.ChannelLog) { rc := b.rp.Get() defer rc.Close() @@ -489,6 +481,33 @@ func (b *backend) MarkOutgoingMsgComplete(ctx context.Context, msg courier.MsgOu slog.Error("unable to update session timeout", "error", err, "session_id", dbMsg.SessionID_) } } + + if wasSuccess { + b.stats.RecordSendSuccess(msg.Channel().ChannelType()) + } else { + b.stats.RecordSendError(msg.Channel().ChannelType()) + } + + b.stats.RecordSendDuration(msg.Channel().ChannelType(), clog.Elapsed) +} + +// OnReceiveComplete is called when the server has finished handling an incoming request +func (b *backend) OnReceiveComplete(ctx context.Context, ch courier.Channel, events []courier.Event, clog *courier.ChannelLog) { + for _, event := range events { + switch event.(type) { + case courier.MsgIn: + b.stats.RecordReceiveMessage(ch.ChannelType()) + case courier.StatusUpdate: + b.stats.RecordReceiveStatus(ch.ChannelType()) + case courier.ChannelEvent: + b.stats.RecordReceiveEvent(ch.ChannelType()) + } + } + if len(events) == 0 { + b.stats.RecordReceiveIgnored(ch.ChannelType()) + } + + b.stats.RecordReceiveDuration(ch.ChannelType(), clog.Elapsed) } // WriteMsg writes the passed in message to our store @@ -737,7 +756,6 @@ func (b *backend) Health() string { return health.String() } -// Heartbeat is called every minute, we log our queue depth to librato func (b *backend) Heartbeat() error { rc := b.rp.Get() defer rc.Close() @@ -774,34 +792,42 @@ func (b *backend) Heartbeat() error { dbStats := b.db.Stats() redisStats := b.rp.Stats() - dbWaitDurationInPeriod := dbStats.WaitDuration - b.stats.dbWaitDuration - redisWaitDurationInPeriod := redisStats.WaitDuration - b.stats.redisWaitDuration - - b.stats.dbWaitDuration = dbStats.WaitDuration - b.stats.redisWaitDuration = redisStats.WaitDuration + stats := b.stats.Stats(dbStats, redisStats) + metrics := make([]cwtypes.MetricDatum, 0, 10) hostDim := cwatch.Dimension("Host", b.config.InstanceID) - b.CloudWatch().Queue( + metrics = append(metrics, cwatch.Datum("DBConnectionsInUse", float64(dbStats.InUse), cwtypes.StandardUnitCount, hostDim), - cwatch.Datum("DBConnectionWaitDuration", float64(dbWaitDurationInPeriod/time.Millisecond), cwtypes.StandardUnitMilliseconds, hostDim), + cwatch.Datum("DBConnectionWaitDuration", float64(stats.DBWaitDuration/time.Second), cwtypes.StandardUnitSeconds, hostDim), cwatch.Datum("RedisConnectionsInUse", float64(redisStats.ActiveCount), cwtypes.StandardUnitCount, hostDim), - cwatch.Datum("RedisConnectionsWaitDuration", float64(redisWaitDurationInPeriod/time.Millisecond), cwtypes.StandardUnitMilliseconds, hostDim), - ) + cwatch.Datum("RedisConnectionsWaitDuration", float64(stats.RedisWaitDuration/time.Second), cwtypes.StandardUnitSeconds, hostDim), - b.CloudWatch().Queue( cwatch.Datum("QueuedMsgs", float64(bulkSize), cwtypes.StandardUnitCount, cwatch.Dimension("QueueName", "bulk")), cwatch.Datum("QueuedMsgs", float64(prioritySize), cwtypes.StandardUnitCount, cwatch.Dimension("QueueName", "priority")), + cwatch.Datum("ContactsCreated", float64(stats.ContactsCreated), cwtypes.StandardUnitCount), ) - slog.Info("current metrics", - "db_inuse", dbStats.InUse, - "db_wait", dbWaitDurationInPeriod, - "redis_inuse", redisStats.ActiveCount, - "redis_wait", redisWaitDurationInPeriod, - "priority_size", prioritySize, - "bulk_size", bulkSize, - ) + for cType, count := range stats.SendSuccesses { + metrics = append(metrics, cwatch.Datum("SendSucceeded", float64(count), cwtypes.StandardUnitCount, cwatch.Dimension("ChannelType", string(cType)))) + } + for cType, count := range stats.SendErrors { + metrics = append(metrics, cwatch.Datum("SendErrored", float64(count), cwtypes.StandardUnitCount, cwatch.Dimension("ChannelType", string(cType)))) + } + for cType, duration := range stats.SendDuration { + numSends := stats.SendSuccesses[cType] + stats.SendErrors[cType] + avgTime := float64(duration) / float64(numSends) + metrics = append(metrics, cwatch.Datum("SendDuration", avgTime, cwtypes.StandardUnitSeconds, cwatch.Dimension("ChannelType", string(cType)))) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + if err := b.cw.Send(ctx, metrics...); err != nil { + slog.Error("error sending metrics", "error", err) + } else { + slog.Info("sent metrics to cloudwatch", "metrics", len(metrics)) + } + cancel() + return nil } @@ -878,8 +904,3 @@ func (b *backend) Status() string { func (b *backend) RedisPool() *redis.Pool { return b.rp } - -// CloudWatch return the cloudwatch service -func (b *backend) CloudWatch() *cwatch.Service { - return b.cw -} diff --git a/backends/rapidpro/backend_test.go b/backends/rapidpro/backend_test.go index 49393ed0..ff322ad7 100644 --- a/backends/rapidpro/backend_test.go +++ b/backends/rapidpro/backend_test.go @@ -900,7 +900,7 @@ func (ts *BackendTestSuite) TestOutgoingQueue() { ts.Equal(msg.Text(), "test message") // mark this message as dealt with - ts.b.MarkOutgoingMsgComplete(ctx, msg, ts.b.NewStatusUpdate(msg.Channel(), msg.ID(), courier.MsgStatusWired, clog)) + ts.b.OnSendComplete(ctx, msg, ts.b.NewStatusUpdate(msg.Channel(), msg.ID(), courier.MsgStatusWired, clog), clog) // this message should now be marked as sent sent, err := ts.b.WasMsgSent(ctx, msg.ID()) diff --git a/backends/rapidpro/contact.go b/backends/rapidpro/contact.go index c30fe400..e9fd8d70 100644 --- a/backends/rapidpro/contact.go +++ b/backends/rapidpro/contact.go @@ -10,10 +10,8 @@ import ( "time" "unicode/utf8" - cwtypes "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/jmoiron/sqlx" "github.com/nyaruka/courier" - "github.com/nyaruka/gocommon/aws/cwatch" "github.com/nyaruka/gocommon/dbutil" "github.com/nyaruka/gocommon/urns" "github.com/nyaruka/gocommon/uuids" @@ -218,9 +216,7 @@ func contactForURN(ctx context.Context, b *backend, org OrgID, channel *Channel, // store this URN on our contact contact.URNID_ = contactURN.ID - // report that we created a new contact - b.cw.Queue(cwatch.Datum("ContactCreated", float64(1), cwtypes.StandardUnitCount)) + b.stats.RecordContactCreated() - // and return it return contact, nil } diff --git a/backends/rapidpro/stats.go b/backends/rapidpro/stats.go new file mode 100644 index 00000000..dd0cc566 --- /dev/null +++ b/backends/rapidpro/stats.go @@ -0,0 +1,155 @@ +package rapidpro + +import ( + "database/sql" + "maps" + "sync" + "time" + + "github.com/gomodule/redigo/redis" + "github.com/nyaruka/courier" +) + +type Stats struct { + ReceiveMessages map[courier.ChannelType]int + ReceiveStatuses map[courier.ChannelType]int + ReceiveEvents map[courier.ChannelType]int + ReceiveIgnored map[courier.ChannelType]int + ReceiveDuration map[courier.ChannelType]time.Duration + + SendSuccesses map[courier.ChannelType]int + SendErrors map[courier.ChannelType]int + SendDuration map[courier.ChannelType]time.Duration + + ContactsCreated int + + DBWaitDuration time.Duration + RedisWaitDuration time.Duration +} + +func newStats() *Stats { + return &Stats{ + ReceiveMessages: make(map[courier.ChannelType]int), + ReceiveStatuses: make(map[courier.ChannelType]int), + ReceiveEvents: make(map[courier.ChannelType]int), + ReceiveIgnored: make(map[courier.ChannelType]int), + ReceiveDuration: make(map[courier.ChannelType]time.Duration), + + SendSuccesses: make(map[courier.ChannelType]int), + SendErrors: make(map[courier.ChannelType]int), + SendDuration: make(map[courier.ChannelType]time.Duration), + + ContactsCreated: 0, + } +} + +func (s *Stats) reset(db sql.DBStats, rp redis.PoolStats) { + clear(s.ReceiveMessages) + clear(s.ReceiveStatuses) + clear(s.ReceiveEvents) + clear(s.ReceiveIgnored) + clear(s.ReceiveDuration) + + clear(s.SendSuccesses) + clear(s.SendErrors) + clear(s.SendDuration) + + s.ContactsCreated = 0 + + s.DBWaitDuration = db.WaitDuration + s.RedisWaitDuration = rp.WaitDuration +} + +// StatsCollector provides threadsafe stats collection +type StatsCollector struct { + mutex sync.Mutex + stats *Stats +} + +// NewStatsCollector creates a new stats collector +func NewStatsCollector() *StatsCollector { + return &StatsCollector{stats: newStats()} +} + +func (c *StatsCollector) RecordReceiveMessage(typ courier.ChannelType) { + c.mutex.Lock() + c.stats.ReceiveMessages[typ]++ + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordReceiveStatus(typ courier.ChannelType) { + c.mutex.Lock() + c.stats.ReceiveStatuses[typ]++ + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordReceiveEvent(typ courier.ChannelType) { + c.mutex.Lock() + c.stats.ReceiveEvents[typ]++ + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordReceiveIgnored(typ courier.ChannelType) { + c.mutex.Lock() + c.stats.ReceiveIgnored[typ]++ + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordReceiveDuration(typ courier.ChannelType, d time.Duration) { + c.mutex.Lock() + c.stats.ReceiveDuration[typ] += d + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordSendSuccess(typ courier.ChannelType) { + c.mutex.Lock() + c.stats.SendSuccesses[typ]++ + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordSendError(typ courier.ChannelType) { + c.mutex.Lock() + c.stats.SendErrors[typ]++ + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordSendDuration(typ courier.ChannelType, d time.Duration) { + c.mutex.Lock() + c.stats.SendDuration[typ] += d + c.mutex.Unlock() +} + +func (c *StatsCollector) RecordContactCreated() { + c.mutex.Lock() + c.stats.ContactsCreated++ + c.mutex.Unlock() +} + +// Stats returns the stats for the period since the last call +func (c *StatsCollector) Stats(db sql.DBStats, rp redis.PoolStats) *Stats { + c.mutex.Lock() + defer c.mutex.Unlock() + + stats := &Stats{ + ContactsCreated: c.stats.ContactsCreated, + + ReceiveMessages: maps.Clone(c.stats.ReceiveMessages), + ReceiveStatuses: maps.Clone(c.stats.ReceiveStatuses), + ReceiveEvents: maps.Clone(c.stats.ReceiveEvents), + ReceiveIgnored: maps.Clone(c.stats.ReceiveIgnored), + ReceiveDuration: maps.Clone(c.stats.ReceiveDuration), + + SendSuccesses: maps.Clone(c.stats.SendSuccesses), + SendErrors: maps.Clone(c.stats.SendErrors), + SendDuration: maps.Clone(c.stats.SendDuration), + + // both sqlx and redis provide wait stats which are cummulative that we need to convert into increments by + // tracking their previous values + DBWaitDuration: db.WaitDuration - c.stats.DBWaitDuration, + RedisWaitDuration: rp.WaitDuration - c.stats.RedisWaitDuration, + } + + c.stats.reset(db, rp) + + return stats +} diff --git a/backends/rapidpro/stats_test.go b/backends/rapidpro/stats_test.go new file mode 100644 index 00000000..611eea93 --- /dev/null +++ b/backends/rapidpro/stats_test.go @@ -0,0 +1,48 @@ +package rapidpro_test + +import ( + "database/sql" + "testing" + "time" + + "github.com/gomodule/redigo/redis" + "github.com/nyaruka/courier" + "github.com/nyaruka/courier/backends/rapidpro" + "github.com/stretchr/testify/assert" +) + +func TestStats(t *testing.T) { + db := sql.DBStats{WaitDuration: time.Second} + rp := redis.PoolStats{WaitDuration: time.Second * 3} + + sc := rapidpro.NewStatsCollector() + sc.RecordContactCreated() + sc.RecordContactCreated() + sc.RecordReceiveMessage("T") + sc.RecordSendSuccess("T") + sc.RecordSendSuccess("T") + sc.RecordSendSuccess("FBA") + sc.RecordSendSuccess("FBA") + sc.RecordSendSuccess("FBA") + + stats := sc.Stats(db, rp) + + assert.Equal(t, 2, stats.ContactsCreated) + assert.Equal(t, map[courier.ChannelType]int{"T": 1}, stats.ReceiveMessages) + assert.Equal(t, map[courier.ChannelType]int{"T": 2, "FBA": 3}, stats.SendSuccesses) + assert.Equal(t, time.Second, stats.DBWaitDuration) + assert.Equal(t, time.Second*3, stats.RedisWaitDuration) + + sc.RecordSendSuccess("FBA") + sc.RecordSendSuccess("FBA") + db.WaitDuration = time.Second * 3 + rp.WaitDuration = time.Second * 3 + + stats = sc.Stats(db, rp) + + assert.Equal(t, 0, stats.ContactsCreated) + assert.Equal(t, map[courier.ChannelType]int{}, stats.ReceiveMessages) + assert.Equal(t, map[courier.ChannelType]int{"FBA": 2}, stats.SendSuccesses) + assert.Equal(t, time.Second*2, stats.DBWaitDuration) + assert.Equal(t, time.Duration(0), stats.RedisWaitDuration) +} diff --git a/sender.go b/sender.go index 6a6c20a2..e5d3ea84 100644 --- a/sender.go +++ b/sender.go @@ -7,9 +7,7 @@ import ( "log/slog" "time" - "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/nyaruka/courier/utils/clogs" - "github.com/nyaruka/gocommon/aws/cwatch" "github.com/nyaruka/gocommon/urns" ) @@ -289,8 +287,6 @@ func (w *Sender) sendMessage(msg MsgOut) { log = log.With("quick_replies", msg.QuickReplies()) } - start := time.Now() - // if this is a resend, clear our sent status if msg.IsResend() { err := backend.ClearMsgSent(sendCTX, msg.ID()) @@ -327,26 +323,7 @@ func (w *Sender) sendMessage(msg MsgOut) { log.Warn("duplicate send, marking as wired") } else { - status = w.sendByHandler(sendCTX, handler, msg, clog, log) - - duration := time.Since(start) - secondDuration := float64(duration) / float64(time.Second) - log.Debug("send complete", "status", status.Status(), "elapsed", duration) - - channelTypeDim := cwatch.Dimension("ChannelType", string(msg.Channel().ChannelType())) - - // report to librato - if status.Status() == MsgStatusErrored || status.Status() == MsgStatusFailed { - backend.CloudWatch().Queue( - cwatch.Datum("MsgSendError", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim), - ) - - } else { - backend.CloudWatch().Queue( - cwatch.Datum("MsgSend", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim), - ) - } } // we allot 10 seconds to write our status to the db @@ -367,7 +344,7 @@ func (w *Sender) sendMessage(msg MsgOut) { } // mark our send task as complete - backend.MarkOutgoingMsgComplete(writeCTX, msg, status) + backend.OnSendComplete(writeCTX, msg, status, clog) } func (w *Sender) sendByHandler(ctx context.Context, h ChannelHandler, m MsgOut, clog *ChannelLog, log *slog.Logger) StatusUpdate { diff --git a/server.go b/server.go index 556bae73..c807a4ca 100644 --- a/server.go +++ b/server.go @@ -16,11 +16,9 @@ import ( "sync" "time" - "github.com/aws/aws-sdk-go-v2/service/cloudwatch/types" "github.com/go-chi/chi/v5" "github.com/go-chi/chi/v5/middleware" "github.com/nyaruka/courier/utils/clogs" - "github.com/nyaruka/gocommon/aws/cwatch" "github.com/nyaruka/gocommon/httpx" "github.com/nyaruka/gocommon/jsonx" ) @@ -248,8 +246,6 @@ func (s *server) initializeChannelHandlers() { func (s *server) channelHandleWrapper(handler ChannelHandler, handlerFunc ChannelHandleFunc, logType clogs.LogType) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - start := time.Now() - // stuff a few things in our context that help with logging baseCtx := context.WithValue(r.Context(), contextRequestURL, r.URL.String()) baseCtx = context.WithValue(baseCtx, contextRequestStart, time.Now()) @@ -290,8 +286,6 @@ func (s *server) channelHandleWrapper(handler ChannelHandler, handlerFunc Channe clog := NewChannelLogForIncoming(logType, channel, recorder, handler.RedactValues(channel)) events, hErr := handlerFunc(ctx, channel, recorder.ResponseWriter, r, clog) - duration := time.Since(start) - secondDuration := float64(duration) / float64(time.Second) // if we received an error, write it out and report it if hErr != nil { @@ -306,30 +300,15 @@ func (s *server) channelHandleWrapper(handler ChannelHandler, handlerFunc Channe } if channel != nil { - cw := s.Backend().CloudWatch() - channelTypeDim := cwatch.Dimension("ChannelType", string(channel.ChannelType())) - - // if we have a channel but no events were created, we still log this to metrics - if len(events) == 0 { - if hErr != nil { - cw.Queue(cwatch.Datum("ChannelError", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim)) - } else { - cw.Queue(cwatch.Datum("ChannelIgnored", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim)) - } - } - for _, event := range events { switch e := event.(type) { case MsgIn: clog.SetAttached(true) - cw.Queue(cwatch.Datum("MsgReceive", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim)) LogMsgReceived(r, e) case StatusUpdate: clog.SetAttached(true) - cw.Queue(cwatch.Datum("MsgStatus", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim)) LogMsgStatusReceived(r, e) case ChannelEvent: - cw.Queue(cwatch.Datum("EventReceive", float64(secondDuration), types.StandardUnitSeconds, channelTypeDim)) LogChannelEventReceived(r, e) } } @@ -339,9 +318,10 @@ func (s *server) channelHandleWrapper(handler ChannelHandler, handlerFunc Channe if err := s.backend.WriteChannelLog(ctx, clog); err != nil { slog.Error("error writing channel log", "error", err) } + + s.backend.OnReceiveComplete(ctx, channel, events, clog) } else { slog.Info("non-channel specific request", "error", err, "channel_type", handler.ChannelType(), "request", recorder.Trace.RequestTrace, "status", recorder.Trace.Response.StatusCode) - } } } diff --git a/test/backend.go b/test/backend.go index c7b3219e..836cbbd3 100644 --- a/test/backend.go +++ b/test/backend.go @@ -13,7 +13,6 @@ import ( _ "github.com/lib/pq" "github.com/nyaruka/courier" "github.com/nyaruka/courier/utils" - "github.com/nyaruka/gocommon/aws/cwatch" "github.com/nyaruka/gocommon/httpx" "github.com/nyaruka/gocommon/urns" "github.com/nyaruka/gocommon/uuids" @@ -46,8 +45,6 @@ type MockBackend struct { mutex sync.RWMutex redisPool *redis.Pool - cw *cwatch.Service - writtenMsgs []courier.MsgIn writtenMsgStatuses []courier.StatusUpdate writtenChannelEvents []courier.ChannelEvent @@ -86,11 +83,6 @@ func NewMockBackend() *MockBackend { log.Fatal(err) } - cw, err := cwatch.NewService("root", "tembatemba", "us-east-1", "Temba", "test") - if err != nil { - log.Fatal(err) - } - return &MockBackend{ channels: make(map[courier.ChannelUUID]courier.Channel), channelsByAddress: make(map[courier.ChannelAddress]courier.Channel), @@ -99,7 +91,6 @@ func NewMockBackend() *MockBackend { sentMsgs: make(map[courier.MsgID]bool), seenExternalIDs: make(map[string]courier.MsgUUID), redisPool: redisPool, - cw: cw, } } @@ -179,14 +170,17 @@ func (mb *MockBackend) ClearMsgSent(ctx context.Context, id courier.MsgID) error return nil } -// MarkOutgoingMsgComplete marks the passed msg as having been dealt with -func (mb *MockBackend) MarkOutgoingMsgComplete(ctx context.Context, msg courier.MsgOut, s courier.StatusUpdate) { +// OnSendComplete marks the passed msg as having been dealt with +func (mb *MockBackend) OnSendComplete(ctx context.Context, msg courier.MsgOut, s courier.StatusUpdate, clog *courier.ChannelLog) { mb.mutex.Lock() defer mb.mutex.Unlock() mb.sentMsgs[msg.ID()] = true } +func (mb *MockBackend) OnReceiveComplete(ctx context.Context, ch courier.Channel, events []courier.Event, clog *courier.ChannelLog) { +} + // WriteChannelLog writes the passed in channel log to the DB func (mb *MockBackend) WriteChannelLog(ctx context.Context, clog *courier.ChannelLog) error { mb.mutex.Lock() @@ -391,11 +385,6 @@ func (mb *MockBackend) RedisPool() *redis.Pool { return mb.redisPool } -// CloudWatch returns the cloudwatch service for this backend -func (mb *MockBackend) CloudWatch() *cwatch.Service { - return mb.cw -} - //////////////////////////////////////////////////////////////////////////////// // Methods not part of the backed interface but used in tests ////////////////////////////////////////////////////////////////////////////////