13
13
# limitations under the License.
14
14
15
15
import os
16
+ import unittest
16
17
17
18
from launch import LaunchDescription
18
- from launch import LaunchService
19
19
from launch .actions import ExecuteProcess
20
- from launch_testing import LaunchTestService
21
- from launch_testing .output import create_output_test_from_file
20
+ from launch .actions import OpaqueFunction
22
21
22
+ import launch_testing
23
+ import launch_testing .asserts
23
24
24
- def test_logging_output_format ():
25
- ld = LaunchDescription ()
26
- launch_test = LaunchTestService ()
25
+
26
+ def generate_test_description (ready_fn ):
27
+ processes_to_test = []
28
+
29
+ launch_description = LaunchDescription ()
27
30
# Re-use the test_logging_long_messages test binary and modify the output format from an
28
31
# environment variable.
29
32
executable = os .path .join (os .getcwd (), 'test_logging_long_messages' )
@@ -35,55 +38,59 @@ def test_logging_output_format():
35
38
env_long ['RCUTILS_CONSOLE_OUTPUT_FORMAT' ] = \
36
39
'[{{name}}].({severity}) output: {file_name}:{line_number} {message}, again: {message} ({function_name}()){' # noqa
37
40
name = 'test_logging_output_format_long'
38
- action = launch_test . add_fixture_action ( ld , ExecuteProcess (
41
+ launch_description . add_action ( ExecuteProcess (
39
42
cmd = [executable ], env = env_long , name = name , output = 'screen'
40
43
))
41
- output_file = os .path .join (os .path .dirname (__file__ ), name )
42
- launch_test .add_output_test (
43
- ld , action , create_output_test_from_file (output_file )
44
- )
44
+ processes_to_test .append (name )
45
45
46
46
env_edge_cases = dict (os .environ )
47
47
# This custom output is to check different edge cases of the output format string parsing.
48
48
env_edge_cases ['RCUTILS_CONSOLE_OUTPUT_FORMAT' ] = '{}}].({unknown_token}) {{{{'
49
49
name = 'test_logging_output_format_edge_cases'
50
- action = launch_test . add_fixture_action ( ld , ExecuteProcess (
50
+ launch_description . add_action ( ExecuteProcess (
51
51
cmd = [executable ], env = env_edge_cases , name = name , output = 'screen'
52
52
))
53
- output_file = os .path .join (os .path .dirname (__file__ ), name )
54
- launch_test .add_output_test (
55
- ld , action , create_output_test_from_file (output_file )
56
- )
53
+ processes_to_test .append (name )
57
54
58
55
env_no_tokens = dict (os .environ )
59
56
# This custom output is to check that there are no issues when no tokens are used.
60
57
env_no_tokens ['RCUTILS_CONSOLE_OUTPUT_FORMAT' ] = 'no_tokens'
61
58
name = 'test_logging_output_format_no_tokens'
62
- action = launch_test . add_fixture_action ( ld , ExecuteProcess (
59
+ launch_description . add_action ( ExecuteProcess (
63
60
cmd = [executable ], env = env_no_tokens , name = name , output = 'screen'
64
61
))
65
- output_file = os .path .join (os .path .dirname (__file__ ), name )
66
- launch_test .add_output_test (
67
- ld , action , create_output_test_from_file (output_file )
68
- )
62
+ processes_to_test .append (name )
69
63
70
64
env_time_tokens = dict (os .environ )
71
65
# This custom output is to check that time stamps work correctly
72
66
env_time_tokens ['RCUTILS_CONSOLE_OUTPUT_FORMAT' ] = "'{time}' '{time_as_nanoseconds}'"
73
67
name = 'test_logging_output_timestamps'
74
- action = launch_test . add_fixture_action ( ld , ExecuteProcess (
68
+ launch_description . add_action ( ExecuteProcess (
75
69
cmd = [executable ], env = env_time_tokens , name = name , output = 'screen'
76
70
))
77
- output_file = os .path .join (os .path .dirname (__file__ ), name )
78
- launch_test .add_output_test (
79
- ld , action , create_output_test_from_file (output_file )
71
+ processes_to_test .append (name )
72
+
73
+ launch_description .add_action (
74
+ OpaqueFunction (function = lambda context : ready_fn ())
80
75
)
81
76
82
- launch_service = LaunchService ()
83
- launch_service .include_launch_description (ld )
84
- return_code = launch_test .run (launch_service )
85
- assert return_code == 0 , 'Launch failed with exit code %r' % (return_code ,)
77
+ return launch_description , {'processes_to_test' : processes_to_test }
78
+
79
+
80
+ @launch_testing .post_shutdown_test ()
81
+ class TestLoggingOutputFormatAfterShutdown (unittest .TestCase ):
86
82
83
+ def test_logging_output (self , proc_output , processes_to_test ):
84
+ """Test all executables output against expectations."""
85
+ for process_name in processes_to_test :
86
+ launch_testing .asserts .assertInStdout (
87
+ proc_output ,
88
+ expected_output = launch_testing .tools .expected_output_from_file (
89
+ path = os .path .join (os .path .dirname (__file__ ), process_name )
90
+ ),
91
+ process = process_name
92
+ )
87
93
88
- if __name__ == '__main__' :
89
- test_logging_output_format ()
94
+ def test_processes_exit_codes (self , proc_info ):
95
+ """Test that all executables finished cleanly."""
96
+ launch_testing .asserts .assertExitCodes (proc_info )
0 commit comments