diff --git a/Sources/ServiceLifecycle/ServiceGroup.swift b/Sources/ServiceLifecycle/ServiceGroup.swift index 3673469..728494a 100644 --- a/Sources/ServiceLifecycle/ServiceGroup.swift +++ b/Sources/ServiceLifecycle/ServiceGroup.swift @@ -436,6 +436,9 @@ public actor ServiceGroup: Sendable { fatalError("Unexpected state") } + // We are storing the first error of a service that threw here. + var error: Error? + // We have to shutdown the services in reverse. To do this // we are going to signal each child task the graceful shutdown and then wait for // its exit. @@ -487,25 +490,38 @@ public actor ServiceGroup: Sendable { throw ServiceGroupError.serviceFinishedUnexpectedly() } - case .serviceThrew(let service, _, let error): + case .serviceThrew(let service, _, let serviceError): switch service.failureTerminationBehavior.behavior { case .cancelGroup: self.logger.debug( "Service threw error during graceful shutdown. Cancelling group.", metadata: [ self.loggingConfiguration.keys.serviceKey: "\(service.service)", - self.loggingConfiguration.keys.errorKey: "\(error)", + self.loggingConfiguration.keys.errorKey: "\(serviceError)", ] ) group.cancelAll() - throw error + throw serviceError + + case .gracefullyShutdownGroup: + self.logger.debug( + "Service threw error during graceful shutdown.", + metadata: [ + self.loggingConfiguration.keys.serviceKey: "\(service.service)", + self.loggingConfiguration.keys.errorKey: "\(serviceError)", + ] + ) + + if error == nil { + error = serviceError + } - case .gracefullyShutdownGroup, .ignore: + case .ignore: self.logger.debug( "Service threw error during graceful shutdown.", metadata: [ self.loggingConfiguration.keys.serviceKey: "\(service.service)", - self.loggingConfiguration.keys.errorKey: "\(error)", + self.loggingConfiguration.keys.errorKey: "\(serviceError)", ] ) @@ -538,6 +554,12 @@ public actor ServiceGroup: Sendable { // are the tasks that listen to the various graceful shutdown signals. We // just have to cancel those group.cancelAll() + + // If we saw an error during graceful shutdown from a service that triggers graceful + // shutdown on error then we have to rethrow that error now + if let error = error { + throw error + } } } diff --git a/Tests/ServiceLifecycleTests/ServiceGroupTests.swift b/Tests/ServiceLifecycleTests/ServiceGroupTests.swift index 0b8b7c9..7c245c0 100644 --- a/Tests/ServiceLifecycleTests/ServiceGroupTests.swift +++ b/Tests/ServiceLifecycleTests/ServiceGroupTests.swift @@ -707,7 +707,7 @@ final class ServiceGroupTests: XCTestCase { gracefulShutdownSignals: [.sigalrm] ) - await withThrowingTaskGroup(of: Void.self) { group in + try await withThrowingTaskGroup(of: Void.self) { group in group.addTask { try await serviceGroup.run() } @@ -748,13 +748,85 @@ final class ServiceGroupTests: XCTestCase { await XCTAsyncAssertEqual(await eventIterator2.next(), .runPing) // Let's throw from the middle service - await service2.resumeRunContinuation(with: .failure(CancellationError())) + await service2.resumeRunContinuation(with: .failure(ExampleError())) // The first service should now receive a cancellation await XCTAsyncAssertEqual(await eventIterator1.next(), .runCancelled) // Let's exit from the first service await service1.resumeRunContinuation(with: .success(())) + + try await XCTAsyncAssertThrowsError(await group.next()) { + XCTAssertTrue($0 is ExampleError) + } + } + } + + func testGracefulShutdownOrdering_whenServiceThrows_andServiceGracefullyShutsdown() async throws { + let service1 = MockService(description: "Service1") + let service2 = MockService(description: "Service2") + let service3 = MockService(description: "Service3") + let serviceGroup = self.makeServiceGroup( + services: [ + .init(service: service1), + .init(service: service2, failureTerminationBehavior: .gracefullyShutdownGroup), + .init(service: service3) + ], + gracefulShutdownSignals: [.sigalrm] + ) + + try await withThrowingTaskGroup(of: Void.self) { group in + group.addTask { + try await serviceGroup.run() + } + + var eventIterator1 = service1.events.makeAsyncIterator() + await XCTAsyncAssertEqual(await eventIterator1.next(), .run) + + var eventIterator2 = service2.events.makeAsyncIterator() + await XCTAsyncAssertEqual(await eventIterator2.next(), .run) + + var eventIterator3 = service3.events.makeAsyncIterator() + await XCTAsyncAssertEqual(await eventIterator3.next(), .run) + + let pid = getpid() + kill(pid, UnixSignal.sigalrm.rawValue) + + // The last service should receive the shutdown signal first + await XCTAsyncAssertEqual(await eventIterator3.next(), .shutdownGracefully) + + // Waiting to see that all three are still running + service1.sendPing() + service2.sendPing() + service3.sendPing() + await XCTAsyncAssertEqual(await eventIterator1.next(), .runPing) + await XCTAsyncAssertEqual(await eventIterator2.next(), .runPing) + await XCTAsyncAssertEqual(await eventIterator3.next(), .runPing) + + // Let's exit from the last service + await service3.resumeRunContinuation(with: .success(())) + + // The middle service should now receive the signal + await XCTAsyncAssertEqual(await eventIterator2.next(), .shutdownGracefully) + + // Waiting to see that the two remaining are still running + service1.sendPing() + service2.sendPing() + await XCTAsyncAssertEqual(await eventIterator1.next(), .runPing) + await XCTAsyncAssertEqual(await eventIterator2.next(), .runPing) + + // Let's throw from the middle service + await service2.resumeRunContinuation(with: .failure(ExampleError())) + + // The first service should now receive a cancellation + await XCTAsyncAssertEqual(await eventIterator1.next(), .shutdownGracefully) + + // Let's exit from the first service + await service1.resumeRunContinuation(with: .success(())) + + try await XCTAsyncAssertThrowsError(await group.next()) { + XCTAssertTrue($0 is ExampleError) + } } } @@ -881,7 +953,9 @@ final class ServiceGroupTests: XCTestCase { // Let's throw from the first service await service1.resumeRunContinuation(with: .failure(ExampleError())) - await XCTAsyncAssertNoThrow(try await group.next()) + try await XCTAsyncAssertThrowsError(await group.next()) { + XCTAssertTrue($0 is ExampleError) + } } }