diff --git a/astrobee/survey_manager/survey_planner/data/jem_survey_dynamic.yaml b/astrobee/survey_manager/survey_planner/data/jem_survey_dynamic.yaml index 316a3101..f7d8f9ae 100644 --- a/astrobee/survey_manager/survey_planner/data/jem_survey_dynamic.yaml +++ b/astrobee/survey_manager/survey_planner/data/jem_survey_dynamic.yaml @@ -36,8 +36,10 @@ goals: # This let_other_robot_reach goal is effectively a very specific kind of between-robot ordering # constraint. It tells honey to let bumble get to bay 5 before taking its first panorama. Without # this constraint, POPF produces a very inefficient plan where bumble never leaves the dock until -# after honey finishes all its tasks and returns to dock. -- {type: let_other_robot_reach, robot: honey, order: 0, location: jem_bay5} +# after honey finishes all its tasks and returns to dock. (It's safe to comment this out if the +# planner doesn't need the hint.) +# - {type: let_other_robot_reach, robot: honey, order: 0, location: jem_bay5} + - {type: panorama, robot: honey, order: 1, location: jem_bay7} - {type: panorama, robot: honey, order: 2, location: jem_bay6} - {type: panorama, robot: honey, order: 3, location: jem_bay5} diff --git a/astrobee/survey_manager/survey_planner/data/sample_output_plan.png b/astrobee/survey_manager/survey_planner/data/sample_output_plan.png new file mode 100644 index 00000000..61ea2afb Binary files /dev/null and b/astrobee/survey_manager/survey_planner/data/sample_output_plan.png differ diff --git a/astrobee/survey_manager/survey_planner/data/sample_output_plan.txt b/astrobee/survey_manager/survey_planner/data/sample_output_plan.txt index b25de7b0..8672ee07 100644 --- a/astrobee/survey_manager/survey_planner/data/sample_output_plan.txt +++ b/astrobee/survey_manager/survey_planner/data/sample_output_plan.txt @@ -1,154 +1,31 @@ -$ (ulimit -t 10 && time ./optic_planner pddl/domain_survey.pddl pddl/problem_jem_survey.pddl) -Number of literals: 215 -Constructing lookup tables: [10%] [20%] [30%] [40%] [50%] [60%] [70%] [80%] [90%] [100%] -Post filtering unreachable actions: [10%] [20%] [30%] [40%] [50%] [60%] [70%] [80%] [90%] [100%] -(robot-order bumble) has finite bounds: [-1.000,4.000] -(robot-order honey) has finite bounds: [-1.000,4.000] -Have identified that smaller values of (robot-order bumble) are preferable -Have identified that smaller values of (robot-order honey) are preferable -Seeing if metric is defined in terms of task vars or a minimal value of makespan -- Yes it is -Recognised a monotonic-change-induced limit on -1.000* -- Must be >= the metric -Looking for achievers for goal index 0, fact (completed-panorama bumble o0 jem_bay4) with fID 83 - (panorama bumble o0 jem_bay4) -For limits: literal goal index 0, fact (completed-panorama bumble o0 jem_bay4), could be achieved by operator (panorama bumble o0 jem_bay4), which has other interesting effects (including one on (robot-available bumble) ) -Looking for achievers for goal index 1, fact (completed-panorama bumble o1 jem_bay3) with fID 75 - (panorama bumble o1 jem_bay3) -For limits: literal goal index 1, fact (completed-panorama bumble o1 jem_bay3), could be achieved by operator (panorama bumble o1 jem_bay3), which has other interesting effects (including one on (robot-available bumble) ) -Looking for achievers for goal index 2, fact (completed-panorama bumble o2 jem_bay2) with fID 67 - (panorama bumble o2 jem_bay2) -For limits: literal goal index 2, fact (completed-panorama bumble o2 jem_bay2), could be achieved by operator (panorama bumble o2 jem_bay2), which has other interesting effects (including one on (robot-available bumble) ) -Looking for achievers for goal index 3, fact (completed-panorama bumble o3 jem_bay1) with fID 59 - (panorama bumble o3 jem_bay1) -For limits: literal goal index 3, fact (completed-panorama bumble o3 jem_bay1), could be achieved by operator (panorama bumble o3 jem_bay1), which has other interesting effects (including one on (robot-available bumble) ) -Looking for achievers for goal index 4, fact (completed-stereo bumble o4 jem_bay1 jem_bay4) with fID 123 - (stereo bumble o4 jem_bay1 jem_bay4 jem_bay5 jem_bay3) (stereo bumble o4 jem_bay1 jem_bay4 jem_bay3 jem_bay5) -For limits: literal goal index 4, fact (completed-stereo bumble o4 jem_bay1 jem_bay4), could be achieved by operator (stereo bumble o4 jem_bay1 jem_bay4 jem_bay5 jem_bay3), which has other interesting effects (including one on (location-available jem_bay4) ) -For limits: literal goal index 5, fact (robot-at bumble berth1), is static or a precondition -Looking for achievers for goal index 6, fact (completed-let-other-robot-reach honey o0 jem_bay5) with fID 155 - (let-other-robot-reach honey o0 jem_bay5 bumble) - Looking at numeric effects of (let-other-robot-reach honey o0 jem_bay5 bumble): 1 and 0 -For limits: literal goal index 6, fact (completed-let-other-robot-reach honey o0 jem_bay5), could be achieved by operator (let-other-robot-reach honey o0 jem_bay5 bumble), which has a non-trivial numeric effect ((robot-order honey) = 0.000) -Looking for achievers for goal index 7, fact (completed-panorama honey o1 jem_bay7) with fID 116 - (panorama honey o1 jem_bay7) -For limits: literal goal index 7, fact (completed-panorama honey o1 jem_bay7), could be achieved by operator (panorama honey o1 jem_bay7), which has other interesting effects (including one on (robot-available honey) ) -Looking for achievers for goal index 8, fact (completed-panorama honey o2 jem_bay6) with fID 108 - (panorama honey o2 jem_bay6) -For limits: literal goal index 8, fact (completed-panorama honey o2 jem_bay6), could be achieved by operator (panorama honey o2 jem_bay6), which has other interesting effects (including one on (robot-available honey) ) -Looking for achievers for goal index 9, fact (completed-panorama honey o3 jem_bay5) with fID 100 - (panorama honey o3 jem_bay5) -For limits: literal goal index 9, fact (completed-panorama honey o3 jem_bay5), could be achieved by operator (panorama honey o3 jem_bay5), which has other interesting effects (including one on (robot-available honey) ) -Looking for achievers for goal index 10, fact (completed-stereo honey o4 jem_bay7 jem_bay4) with fID 124 - (stereo honey o4 jem_bay7 jem_bay4 jem_bay5 jem_bay3) (stereo honey o4 jem_bay7 jem_bay4 jem_bay3 jem_bay5) -For limits: literal goal index 10, fact (completed-stereo honey o4 jem_bay7 jem_bay4), could be achieved by operator (stereo honey o4 jem_bay7 jem_bay4 jem_bay5 jem_bay3), which has other interesting effects (including one on (location-available jem_bay4) ) -For limits: literal goal index 11, fact (robot-at honey berth2), is static or a precondition -Assignment numeric effect ((robot-order bumble) = 0.000) makes effects on 0 be order-dependent -Assignment numeric effect ((robot-order honey) = 0.000) makes effects on 1 be order-dependent -Assignment numeric effect ((robot-order bumble) = 1.000) makes effects on 0 be order-dependent -Assignment numeric effect ((robot-order honey) = 1.000) makes effects on 1 be order-dependent -Assignment numeric effect ((robot-order bumble) = 2.000) makes effects on 0 be order-dependent -Assignment numeric effect ((robot-order honey) = 2.000) makes effects on 1 be order-dependent -Assignment numeric effect ((robot-order bumble) = 3.000) makes effects on 0 be order-dependent -Assignment numeric effect ((robot-order honey) = 3.000) makes effects on 1 be order-dependent -Assignment numeric effect ((robot-order bumble) = 4.000) makes effects on 0 be order-dependent -Assignment numeric effect ((robot-order honey) = 4.000) makes effects on 1 be order-dependent -27% of the ground temporal actions in this problem are compression-safe -Initial heuristic = 29.000, admissible cost estimate 930.008 -b (28.000 | 630.001) -Resorting to best-first search -Running WA* with W = 5.000, not restarting with goal states -b (28.000 | 630.001)b (28.000 | 70.003)b (27.000 | 870.005)b (26.000 | 870.005)b (25.000 | 880.005)b (24.000 | 880.005)b (23.000 | 1670.007)b (22.000 | 1670.007)b (21.000 | 1680.007)b (20.000 | 1680.007)b (19.000 | 2470.009)b (18.000 | 2470.009)b (17.000 | 2470.009)b (16.000 | 3270.011)b (15.000 | 3270.011)b (14.000 | 3870.012)b (13.000 | 3890.013)b (12.000 | 4470.013)b (12.000 | 3930.015)b (11.000 | 3950.016)b (10.000 | 3970.017)b (9.000 | 3990.018)b (7.000 | 4020.019)b (6.000 | 4650.021)b (4.000 | 4890.024)b (3.000 | 4910.025)b (2.000 | 5510.026)b (1.000 | 5510.026)(G) -; LP calculated the cost +$ ./tools/survey_planner.py +0.000: (undock bumble berth1 jem_bay7 jem_bay6 jem_bay8) [30.000] +30.001: (move bumble jem_bay7 jem_bay6 jem_bay5) [20.000] +50.002: (move bumble jem_bay6 jem_bay5 jem_bay4) [20.000] +70.003: (move bumble jem_bay5 jem_bay4 jem_bay3) [20.000] +70.004: (undock honey berth2 jem_bay7 jem_bay6 jem_bay8) [30.000] +90.004: (panorama bumble o0 jem_bay4) [780.000] +100.005: (panorama honey o1 jem_bay7) [780.000] +870.005: (move bumble jem_bay4 jem_bay3 jem_bay2) [20.000] +880.006: (move honey jem_bay7 jem_bay6 jem_bay5) [20.000] +890.006: (panorama bumble o1 jem_bay3) [780.000] +900.007: (panorama honey o2 jem_bay6) [780.000] +1670.007: (move bumble jem_bay3 jem_bay2 jem_bay1) [20.000] +1680.008: (move honey jem_bay6 jem_bay5 jem_bay4) [20.000] +1690.008: (panorama bumble o2 jem_bay2) [780.000] +1700.009: (panorama honey o3 jem_bay5) [780.000] +2470.009: (move bumble jem_bay2 jem_bay1 jem_bay0) [20.000] +2480.010: (move honey jem_bay5 jem_bay6 jem_bay7) [20.000] +2490.010: (panorama bumble o3 jem_bay1) [780.000] +2500.011: (move honey jem_bay6 jem_bay7 jem_bay8) [20.000] +2520.012: (stereo honey o4 jem_bay7 jem_bay4 jem_bay3 jem_bay5) [600.000] +3120.013: (dock honey jem_bay7 berth2) [30.000] +3270.011: (stereo bumble o4 jem_bay1 jem_bay4 jem_bay3 jem_bay5) [600.000] +3870.012: (move bumble jem_bay1 jem_bay2 jem_bay3) [20.000] +3890.013: (move bumble jem_bay2 jem_bay3 jem_bay4) [20.000] +3910.014: (move bumble jem_bay3 jem_bay4 jem_bay5) [20.000] +3930.015: (move bumble jem_bay4 jem_bay5 jem_bay6) [20.000] +3950.016: (move bumble jem_bay5 jem_bay6 jem_bay7) [20.000] +3970.017: (move bumble jem_bay6 jem_bay7 jem_bay8) [20.000] +3990.018: (dock bumble jem_bay7 berth1) [30.000] -; Plan found with metric 5540.027 -; Theoretical reachable cost 5540.028 -; States evaluated so far: 504 -; States pruned based on pre-heuristic cost lower bound: 0 -; Time 1.64 -0.000: (undock bumble berth1 jem_bay7 jem_bay8 jem_bay6) [30.000] -30.001: (move bumble jem_bay7 jem_bay6 jem_bay5) [20.000] -50.002: (move bumble jem_bay6 jem_bay5 jem_bay4) [20.000] -70.003: (let-other-robot-reach honey o0 jem_bay5 bumble) [0.000] -70.004: (move bumble jem_bay5 jem_bay4 jem_bay3) [20.000] -70.004: (undock honey berth2 jem_bay7 jem_bay8 jem_bay6) [30.000] -90.005: (panorama bumble o0 jem_bay4) [780.000] -100.005: (panorama honey o1 jem_bay7) [780.000] -870.006: (move bumble jem_bay4 jem_bay3 jem_bay2) [20.000] -880.006: (move honey jem_bay7 jem_bay6 jem_bay5) [20.000] -890.007: (panorama bumble o1 jem_bay3) [780.000] -900.007: (panorama honey o2 jem_bay6) [780.000] -1670.008: (move bumble jem_bay3 jem_bay2 jem_bay1) [20.000] -1680.008: (move honey jem_bay6 jem_bay7 jem_bay8) [20.000] -1690.009: (panorama bumble o2 jem_bay2) [780.000] -1700.009: (dock honey jem_bay7 berth2) [30.000] -2470.010: (move bumble jem_bay2 jem_bay1 jem_bay0) [20.000] -2490.011: (panorama bumble o3 jem_bay1) [780.000] -3270.012: (stereo bumble o4 jem_bay1 jem_bay4 jem_bay5 jem_bay3) [600.000] -3870.013: (move bumble jem_bay1 jem_bay2 jem_bay3) [20.000] -3890.014: (move bumble jem_bay2 jem_bay3 jem_bay4) [20.000] -3910.015: (move bumble jem_bay3 jem_bay4 jem_bay5) [20.000] -3930.016: (move bumble jem_bay4 jem_bay5 jem_bay6) [20.000] -3950.017: (move bumble jem_bay5 jem_bay6 jem_bay7) [20.000] -3970.018: (move bumble jem_bay6 jem_bay7 jem_bay8) [20.000] -3990.019: (dock bumble jem_bay7 berth1) [30.000] -4020.020: (undock honey berth2 jem_bay7 jem_bay8 jem_bay6) [30.000] -4050.021: (move honey jem_bay7 jem_bay6 jem_bay5) [20.000] -4070.022: (move honey jem_bay6 jem_bay5 jem_bay4) [20.000] -4090.023: (panorama honey o3 jem_bay5) [780.000] -4870.024: (move honey jem_bay5 jem_bay6 jem_bay7) [20.000] -4890.025: (move honey jem_bay6 jem_bay7 jem_bay8) [20.000] -4910.026: (stereo honey o4 jem_bay7 jem_bay4 jem_bay5 jem_bay3) [600.000] -5510.027: (dock honey jem_bay7 berth2) [30.000] - - * All goal deadlines now no later than 5540.027 -b (1.000 | 5470.024)(G) -; LP calculated the cost - -; Plan found with metric 5500.025 -; Theoretical reachable cost 5500.026 -; States evaluated so far: 866 -; States pruned based on pre-heuristic cost lower bound: 1 -; Time 2.85 -0.000: (undock bumble berth1 jem_bay7 jem_bay8 jem_bay6) [30.000] -30.001: (move bumble jem_bay7 jem_bay6 jem_bay5) [20.000] -50.002: (move bumble jem_bay6 jem_bay5 jem_bay4) [20.000] -70.003: (let-other-robot-reach honey o0 jem_bay5 bumble) [0.000] -70.004: (move bumble jem_bay5 jem_bay4 jem_bay3) [20.000] -70.004: (undock honey berth2 jem_bay7 jem_bay8 jem_bay6) [30.000] -90.005: (panorama bumble o0 jem_bay4) [780.000] -100.005: (panorama honey o1 jem_bay7) [780.000] -870.006: (move bumble jem_bay4 jem_bay3 jem_bay2) [20.000] -880.006: (move honey jem_bay7 jem_bay6 jem_bay5) [20.000] -890.007: (panorama bumble o1 jem_bay3) [780.000] -900.007: (panorama honey o2 jem_bay6) [780.000] -1670.008: (move bumble jem_bay3 jem_bay2 jem_bay1) [20.000] -1690.009: (panorama bumble o2 jem_bay2) [780.000] -2470.010: (move bumble jem_bay2 jem_bay1 jem_bay0) [20.000] -2490.011: (panorama bumble o3 jem_bay1) [780.000] -3270.012: (stereo bumble o4 jem_bay1 jem_bay4 jem_bay5 jem_bay3) [600.000] -3870.013: (move bumble jem_bay1 jem_bay2 jem_bay3) [20.000] -3870.013: (move honey jem_bay6 jem_bay5 jem_bay4) [20.000] -3890.014: (move bumble jem_bay2 jem_bay3 jem_bay4) [20.000] -3890.014: (panorama honey o3 jem_bay5) [780.000] -4670.015: (move honey jem_bay5 jem_bay6 jem_bay7) [20.000] -4690.016: (move bumble jem_bay3 jem_bay4 jem_bay5) [20.000] -4690.016: (move honey jem_bay6 jem_bay7 jem_bay8) [20.000] -4710.017: (dock honey jem_bay7 berth2) [30.000] -4710.017: (move bumble jem_bay4 jem_bay3 jem_bay2) [20.000] -4730.018: (move bumble jem_bay3 jem_bay2 jem_bay1) [20.000] -4740.018: (undock honey berth2 jem_bay7 jem_bay8 jem_bay6) [30.000] -4770.019: (stereo honey o4 jem_bay7 jem_bay4 jem_bay5 jem_bay3) [600.000] -5370.020: (dock honey jem_bay7 berth2) [30.000] -5370.020: (move bumble jem_bay2 jem_bay3 jem_bay4) [20.000] -5390.021: (move bumble jem_bay3 jem_bay4 jem_bay5) [20.000] -5410.022: (move bumble jem_bay4 jem_bay5 jem_bay6) [20.000] -5430.023: (move bumble jem_bay5 jem_bay6 jem_bay7) [20.000] -5450.024: (move bumble jem_bay6 jem_bay7 jem_bay8) [20.000] -5470.025: (dock bumble jem_bay7 berth1) [30.000] - - * All goal deadlines now no later than 5500.025 - -real 0m10.009s -user 0m9.767s -sys 0m0.240s diff --git a/astrobee/survey_manager/survey_planner/data/sample_output_plan.yaml b/astrobee/survey_manager/survey_planner/data/sample_output_plan.yaml index 7c624cb1..45b0da90 100644 --- a/astrobee/survey_manager/survey_planner/data/sample_output_plan.yaml +++ b/astrobee/survey_manager/survey_planner/data/sample_output_plan.yaml @@ -25,7 +25,7 @@ - -8.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '70.004' +- start_time_seconds: '70.003' action: type: move robot: bumble @@ -41,7 +41,7 @@ type: undock robot: honey duration_seconds: '30.000' -- start_time_seconds: '90.005' +- start_time_seconds: '90.004' action: type: panorama robot: bumble @@ -61,7 +61,7 @@ - -9.7 - 4.8 duration_seconds: '780.000' -- start_time_seconds: '870.006' +- start_time_seconds: '870.005' action: type: move robot: bumble @@ -83,7 +83,7 @@ - -9.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '890.007' +- start_time_seconds: '890.006' action: type: panorama robot: bumble @@ -103,7 +103,7 @@ - -9.0 - 4.8 duration_seconds: '780.000' -- start_time_seconds: '1670.008' +- start_time_seconds: '1670.007' action: type: move robot: bumble @@ -114,89 +114,49 @@ - -5.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '1690.009' - action: - type: panorama - robot: bumble - location_name: jem_bay2 - location_pos: - - 11.0 - - -5.0 - - 4.8 - duration_seconds: '780.000' -- start_time_seconds: '2470.010' +- start_time_seconds: '1680.008' action: type: move - robot: bumble - from_name: jem_bay2 - to_name: jem_bay1 + robot: honey + from_name: jem_bay6 + to_name: jem_bay5 to_pos: - 11.0 - - -4.0 + - -8.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '2490.011' +- start_time_seconds: '1690.008' action: type: panorama robot: bumble - location_name: jem_bay1 + location_name: jem_bay2 location_pos: - 11.0 - - -4.0 - - 4.8 - duration_seconds: '780.000' -- start_time_seconds: '3270.012' - action: - type: stereo - robot: bumble - fplan: jem_stereo_mapping_bay1_to_bay3.fplan - base_name: jem_bay1 - bound_name: jem_bay4 - duration_seconds: '600.000' -- start_time_seconds: '3870.013' - action: - type: move - robot: bumble - from_name: jem_bay1 - to_name: jem_bay2 - to_pos: - - 11.0 - -5.0 - 4.8 - duration_seconds: '20.000' -- start_time_seconds: '3870.013' + duration_seconds: '780.000' +- start_time_seconds: '1700.009' action: - type: move + type: panorama robot: honey - from_name: jem_bay6 - to_name: jem_bay5 - to_pos: + location_name: jem_bay5 + location_pos: - 11.0 - -8.0 - 4.8 - duration_seconds: '20.000' -- start_time_seconds: '3890.014' + duration_seconds: '780.000' +- start_time_seconds: '2470.009' action: type: move robot: bumble from_name: jem_bay2 - to_name: jem_bay3 + to_name: jem_bay1 to_pos: - 11.0 - - -6.0 + - -4.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '3890.014' - action: - type: panorama - robot: honey - location_name: jem_bay5 - location_pos: - - 11.0 - - -8.0 - - 4.8 - duration_seconds: '780.000' -- start_time_seconds: '4670.015' +- start_time_seconds: '2480.010' action: type: move robot: honey @@ -207,18 +167,17 @@ - -9.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '4690.016' +- start_time_seconds: '2490.010' action: - type: move + type: panorama robot: bumble - from_name: jem_bay3 - to_name: jem_bay4 - to_pos: + location_name: jem_bay1 + location_pos: - 11.0 - - -7.0 + - -4.0 - 4.8 - duration_seconds: '20.000' -- start_time_seconds: '4690.016' + duration_seconds: '780.000' +- start_time_seconds: '2500.011' action: type: move robot: honey @@ -229,54 +188,40 @@ - -9.7 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '4710.017' +- start_time_seconds: '2520.012' + action: + type: stereo + robot: honey + fplan: jem_stereo_mapping_bay4_to_bay7.fplan + base_name: jem_bay7 + bound_name: jem_bay4 + duration_seconds: '600.000' +- start_time_seconds: '3120.013' action: type: dock robot: honey berth: berth2 duration_seconds: '30.000' -- start_time_seconds: '4710.017' +- start_time_seconds: '3270.011' action: - type: move + type: stereo robot: bumble - from_name: jem_bay4 - to_name: jem_bay3 - to_pos: - - 11.0 - - -6.0 - - 4.8 - duration_seconds: '20.000' -- start_time_seconds: '4730.018' + fplan: jem_stereo_mapping_bay1_to_bay3.fplan + base_name: jem_bay1 + bound_name: jem_bay4 + duration_seconds: '600.000' +- start_time_seconds: '3870.012' action: type: move robot: bumble - from_name: jem_bay3 + from_name: jem_bay1 to_name: jem_bay2 to_pos: - 11.0 - -5.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '4740.018' - action: - type: undock - robot: honey - duration_seconds: '30.000' -- start_time_seconds: '4770.019' - action: - type: stereo - robot: honey - fplan: jem_stereo_mapping_bay4_to_bay7.fplan - base_name: jem_bay7 - bound_name: jem_bay4 - duration_seconds: '600.000' -- start_time_seconds: '5370.020' - action: - type: dock - robot: honey - berth: berth2 - duration_seconds: '30.000' -- start_time_seconds: '5370.020' +- start_time_seconds: '3890.013' action: type: move robot: bumble @@ -287,7 +232,7 @@ - -6.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '5390.021' +- start_time_seconds: '3910.014' action: type: move robot: bumble @@ -298,7 +243,7 @@ - -7.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '5410.022' +- start_time_seconds: '3930.015' action: type: move robot: bumble @@ -309,7 +254,7 @@ - -8.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '5430.023' +- start_time_seconds: '3950.016' action: type: move robot: bumble @@ -320,7 +265,7 @@ - -9.0 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '5450.024' +- start_time_seconds: '3970.017' action: type: move robot: bumble @@ -331,7 +276,7 @@ - -9.7 - 4.8 duration_seconds: '20.000' -- start_time_seconds: '5470.025' +- start_time_seconds: '3990.018' action: type: dock robot: bumble diff --git a/astrobee/survey_manager/survey_planner/pddl/domain_survey.pddl b/astrobee/survey_manager/survey_planner/pddl/domain_survey.pddl index 5e197671..00f50c7d 100644 --- a/astrobee/survey_manager/survey_planner/pddl/domain_survey.pddl +++ b/astrobee/survey_manager/survey_planner/pddl/domain_survey.pddl @@ -72,7 +72,7 @@ ;; completed-panorama: The goal to add if you want the plan to include collecting a ;; panorama. For now, goals specify ?robot and ?order parameters that constrain ;; multi-robot task allocation and task ordering. - (completed-panorama ?robot - robot ?order - order ?location - location ) + (completed-panorama ?robot - robot ?order - order ?location - location) ;; completed-stereo: The goal to add if you want the plan to include collecting a stereo ;; survey. For now, goals specify ?robot and ?order parameters that constrain multi-robot @@ -83,13 +83,13 @@ ;; used for collision checking. It's assumed that ?base and ?bound are not adjacent ;; locations. If future stereo surveys violate these assumptions the model will need to be ;; revisited. - (completed-stereo ?robot - robot ?order - order ?base ?bound - location ) + (completed-stereo ?robot - robot ?order - order ?base ?bound - location) ;; completed-let-other-robot-reach: The goal to add if you want one robot to wait for the ;; other to reach a certain location before pursuing its remaining goals (ones with larger ;; ?order values). This basically enables a user to provide a specific kind of ;; between-robots ordering hint to the planner. - (completed-let-other-robot-reach ?robot - robot ?order - order ?loc - location ) + (completed-let-other-robot-reach ?robot - robot ?order - order ?loc - location) ) (:functions @@ -326,7 +326,7 @@ ?other-loc - location ;; location other robot needs to reach ?other-robot - robot ) - :duration (= ?duration 0) + :duration (= ?duration 0.001) ;; VAL won't accept 0-duration actions :condition (and ;; Check robot mutex diff --git a/astrobee/survey_manager/survey_planner/pddl/jem_survey_template.pddl b/astrobee/survey_manager/survey_planner/pddl/jem_survey_template.pddl index 8cc991f1..4e44cdfb 100644 --- a/astrobee/survey_manager/survey_planner/pddl/jem_survey_template.pddl +++ b/astrobee/survey_manager/survey_planner/pddl/jem_survey_template.pddl @@ -26,9 +26,3 @@ {{ dynamic_fluents }} ) ;; end :init ) ;; end problem - -;; Include raw high-level config in problem in case an (ISAAC-custom) planner prefers to use it. - -;; BEGIN CONFIG -{{ config }} -;; END CONFIG diff --git a/astrobee/survey_manager/survey_planner/pddl/problem_jem_survey.pddl b/astrobee/survey_manager/survey_planner/pddl/problem_jem_survey.pddl index d099ca79..04aca907 100644 --- a/astrobee/survey_manager/survey_planner/pddl/problem_jem_survey.pddl +++ b/astrobee/survey_manager/survey_planner/pddl/problem_jem_survey.pddl @@ -22,7 +22,6 @@ (completed-panorama bumble o3 jem_bay1) (completed-stereo bumble o4 jem_bay1 jem_bay4) (robot-at bumble berth1) - (completed-let-other-robot-reach honey o0 jem_bay5) (completed-panorama honey o1 jem_bay7) (completed-panorama honey o2 jem_bay6) (completed-panorama honey o3 jem_bay5) @@ -162,121 +161,3 @@ (= (robot-order honey) -1) ) ;; end :init ) ;; end problem - -;; Include raw high-level config in problem in case an (ISAAC-custom) planner prefers to use it. - -;; BEGIN CONFIG -;; # Copyright (c) 2023, United States Government, as represented by the -;; # Administrator of the National Aeronautics and Space Administration. -;; # -;; # All rights reserved. -;; # -;; # The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -;; # platform" software is licensed under the Apache License, Version 2.0 -;; # (the "License"); you may not use this file except in compliance with the -;; # License. You may obtain a copy of the License at -;; # -;; # http://www.apache.org/licenses/LICENSE-2.0 -;; # -;; # Unless required by applicable law or agreed to in writing, software -;; # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -;; # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -;; # License for the specific language governing permissions and limitations -;; # under the License. -;; -;; # Static configuration info used when generating a PDDL problem and also when executing actions in a -;; # PDDL plan. This info should be static in the sense that it nominally doesn't change during an ISS -;; # activity, so the survey manager doesn't have to modify it. However, an edge case is that an -;; # operator might want to manually edit something in here (like add a new symbolic location or nudge -;; # the position of a named bay away from an obstacle) and restart the survey manager. On the other -;; # hand, info that is *expected* to change as part of the survey manager conops belongs in -;; # jem_survey_dynamic.yaml. -;; -;; # Useful reference for positions and stereo survey trajectories: -;; # https://babelfish.arc.nasa.gov/confluence/display/FFOPS/ISAAC+Phase+1X+Activity+9+Ground+Procedure -;; -;; bays: -;; # 3D coordinates for symbolic bays in ISS Analysis Coordinate System used by Astrobee -;; jem_bay1: [11.0, -4.0, 4.8] -;; jem_bay2: [11.0, -5.0, 4.8] -;; jem_bay3: [11.0, -6.0, 4.8] -;; jem_bay4: [11.0, -7.0, 4.8] -;; jem_bay5: [11.0, -8.0, 4.8] -;; jem_bay6: [11.0, -9.0, 4.8] -;; jem_bay7: [11.0, -9.7, 4.8] -;; -;; bogus_bays: [jem_bay0, jem_bay8] -;; berths: [berth1, berth2] -;; robots: [bumble, honey] -;; -;; stereo: -;; # Meta-data about stereo survey options -;; jem_bay1_to_bay3: -;; # fplan: Name of external fplan specification of trajectory in astrobee_ops/gds/plans/ISAAC/ -;; fplan: "jem_stereo_mapping_bay1_to_bay3.fplan" -;; # base_location: Where trajectory starts and ends for planning purposes (rough location, not exact) -;; base_location: jem_bay1 -;; # bound_location: The other end of the interval covered by the trajectory, for planner collision -;; # check purposes. (Note a trajectory may fly a bit into a bay that it doesn't claim to cover. -;; # The two surveys that cover the module purposefully overlap.) -;; bound_location: jem_bay4 -;; jem_bay4_to_bay7: -;; fplan: "jem_stereo_mapping_bay4_to_bay7.fplan" -;; base_location: jem_bay7 -;; bound_location: jem_bay4 -;; # Copyright (c) 2023, United States Government, as represented by the -;; # Administrator of the National Aeronautics and Space Administration. -;; # -;; # All rights reserved. -;; # -;; # The "ISAAC - Integrated System for Autonomous and Adaptive Caretaking -;; # platform" software is licensed under the Apache License, Version 2.0 -;; # (the "License"); you may not use this file except in compliance with the -;; # License. You may obtain a copy of the License at -;; # -;; # http://www.apache.org/licenses/LICENSE-2.0 -;; # -;; # Unless required by applicable law or agreed to in writing, software -;; # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -;; # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -;; # License for the specific language governing permissions and limitations -;; # under the License. -;; -;; # Example dynamic configuration info used when generating a PDDL problem. For now, this is goal -;; # conditions and initial state. A likely conops is that the initial version of this file for a -;; # specific activity would be hand-generated, but it might later be automatically regenerated by the -;; # survey manager when a replan is needed (remove completed/failed goals, add retry goals, update -;; # initial state to match actual current state, etc.) See also jem_survey_static.yaml. -;; -;; goals: -;; -;; - {type: panorama, robot: bumble, order: 0, location: jem_bay4} -;; - {type: panorama, robot: bumble, order: 1, location: jem_bay3} -;; - {type: panorama, robot: bumble, order: 2, location: jem_bay2} -;; - {type: panorama, robot: bumble, order: 3, location: jem_bay1} -;; - {type: stereo, robot: bumble, order: 4, trajectory: jem_bay1_to_bay3} -;; -;; # We want Bumble to return to its berth at the end of the run, but adding this goal causes POPF to -;; # get confused and greatly increase the total run time. For some reason, it doesn't notice it can -;; # use the same plan as without this goal and then add some motion actions at the end to achieve this -;; # goal. Instead, it falls back to only undocking one robot at a time, which slows things down by -;; # about 2x. -;; - {type: robot_at, robot: bumble, location: berth1} -;; -;; - {type: let_other_robot_reach, robot: honey, order: 0, location: jem_bay5} -;; - {type: panorama, robot: honey, order: 1, location: jem_bay7} -;; - {type: panorama, robot: honey, order: 2, location: jem_bay6} -;; - {type: panorama, robot: honey, order: 3, location: jem_bay5} -;; -;; # This is another objective we want to include that for some reason causes POPF to fail to generate -;; # a plan (hang indefinitely). No obvious reason why it should cause a problem. -;; - {type: stereo, robot: honey, order: 4, trajectory: jem_bay4_to_bay7} -;; -;; - {type: robot_at, robot: honey, location: berth2} -;; -;; init: -;; bumble: -;; location: berth1 -;; honey: -;; location: berth2 -;; END CONFIG diff --git a/astrobee/survey_manager/survey_planner/tools/mypy.ini b/astrobee/survey_manager/survey_planner/tools/mypy.ini new file mode 100644 index 00000000..c967aff3 --- /dev/null +++ b/astrobee/survey_manager/survey_planner/tools/mypy.ini @@ -0,0 +1,16 @@ +# Config for mypy Python type checker - https://mypy.readthedocs.io/ + +[mypy] +# (suppress warning about this section being missing) + +[mypy-pyparsing] +# suppress error "No library stub for module 'pyparsing'" +ignore_missing_imports = True + +[mypy-numpy] +# suppress error "No library stub for module 'numpy'" +ignore_missing_imports = True + +[mypy-matplotlib] +# suppress error "No library stub for module 'matplotlib'" +ignore_missing_imports = True diff --git a/astrobee/survey_manager/survey_planner/tools/problem_generator.py b/astrobee/survey_manager/survey_planner/tools/problem_generator.py index 72c77327..cf6cce06 100755 --- a/astrobee/survey_manager/survey_planner/tools/problem_generator.py +++ b/astrobee/survey_manager/survey_planner/tools/problem_generator.py @@ -171,13 +171,6 @@ def __call__(self, match: re.Match) -> str: return self.params[param] -def comment_for_pddl(text: str) -> str: - """ - Return the result of commenting `text` using PDDL (Lisp-like) comment syntax. - """ - return "\n".join([f";; {line}".strip() for line in text.splitlines()]) - - class ProblemWriter(ABC): "Abstract class for writing a problem intance." @@ -345,7 +338,6 @@ def problem_generator( header_lines += f";; Config {i + 1}: {config_path}\n" full_config += config_path.read_text() writer.set_param("header", header_lines) - writer.set_param("config", comment_for_pddl(full_config)) bays = list(config["bays"].keys()) bogus_bays = config["bogus_bays"] diff --git a/astrobee/survey_manager/survey_planner/tools/survey_planner.py b/astrobee/survey_manager/survey_planner/tools/survey_planner.py new file mode 100755 index 00000000..745459c7 --- /dev/null +++ b/astrobee/survey_manager/survey_planner/tools/survey_planner.py @@ -0,0 +1,1020 @@ +#!/usr/bin/env python3 + +""" +Custom planner for JEM survey domain. +""" + +import argparse +import heapq +import io +import itertools +import pathlib +import sys +from abc import ABC +from dataclasses import dataclass, field +from typing import ( + Any, + Callable, + Dict, + Generic, + Iterable, + Iterator, + List, + NamedTuple, + Optional, + Tuple, + Type, + TypeVar, +) + +import pyparsing as pp +import yaml + +from problem_generator import PDDL_DIR + +LocationName = str # Names PDDL object of type location +LocationIndex = int # Index of location in CONFIG.locations +RobotName = str # Names PDDL object of type robot +Duration = float # Time duration in seconds +Cost = int # Count of moves +CostMap = List[Cost] # Interpreted as mapping of LocationIndex -> Cost +RobotStatus = str # Options: ACTIVE, BLOCKED, DONE +Timestamp = float # Elapsed time since start of sim in seconds +EventCallback = Callable[["SimState"], None] +PendingEvent = Tuple[Timestamp, EventCallback] +ExecutionTrace = List["TraceEvent"] +OrderName = str # Names PDDL object of type order +PddlExpression = Any # Actually a pp.ParseResults. But 'Any' satisfies mypy. +PddlActionName = str # Names PDDL action +T = TypeVar("T") # So we can assign parametric types below +PddlTypeName = str # Names PDDL object type +PddlObjectName = str # Names PDDL object + +UNREACHABLE_COST = 999 + + +@dataclass +class Config: + "Container for custom planner config." + + robots: List[RobotName] = field(default_factory=list) + "PDDL objects of type robot" + + locations: List[LocationName] = field(default_factory=list) + "PDDL objects of type location" + + neighbors: List[List[LocationIndex]] = field(default_factory=list) + "Maps each LocationIndex to a list of neighbor indices." + + action_durations: Dict[PddlActionName, Duration] = field(default_factory=dict) + "Maps each PDDL action to its duration in seconds." + + def __post_init__(self) -> None: + "Set `location_lookup` to match `locations`." + + self.location_lookup = {name: ind for ind, name in enumerate(self.locations)} + # pylint: disable-next=pointless-string-statement # doc string + "Maps each PDDL LocationName to its LocationIndex." + + def update_location_lookup(self) -> None: + "Update `location_lookup` to match `locations`." + self.__post_init__() + + +# Will override fields of this empty placeholder during PDDL parsing phase +CONFIG = Config() + + +def get_cost_to_goal(goal_region: Iterable[LocationIndex]) -> CostMap: + """ + Return a cost array that maps each location index to its cost to the nearest goal location in + `goal_region` (measured in number of moves). + """ + result = [UNREACHABLE_COST] * len(CONFIG.locations) + opens = [(g, 0) for g in goal_region] + while opens: + curr, curr_cost = opens.pop(0) + if curr_cost < result[curr]: + result[curr] = curr_cost + next_cost = curr_cost + 1 + for n in CONFIG.neighbors[curr]: + opens.append((n, next_cost)) + return result + + +def get_best_neighbor(cost_map: CostMap, current_pos: LocationIndex) -> LocationIndex: + """ + Return the neighbor of `current_pos` that has lowest cost in `cost_map` (the logical target + location of the next move). Raise RuntimeError if the goal is unreachable from `current_pos`. + """ + if cost_map[current_pos] == UNREACHABLE_COST: + raise RuntimeError( + f"Can't reach goal region from {CONFIG.locations[current_pos]}" + ) + return min(CONFIG.neighbors[current_pos], key=cost_map.__getitem__) + + +def get_move_location( + goal_region: Iterable[LocationIndex], current_pos: LocationIndex +) -> LocationIndex: + """ + Return the neighbor of `current_pos` that is closest to `goal_region` (the logical target + location of the next move). Raise RuntimeError if `goal_region` is unreachable from + `current_pos`. + """ + return get_best_neighbor(get_cost_to_goal(goal_region), current_pos) + + +def is_location_berth(location: LocationName) -> bool: + "Return True if the location is a berth." + return "berth" in location + + +def is_location_flying(location: LocationName) -> bool: + "Return True if the location is a flying location." + return "bay" in location + + +class TraceEvent(NamedTuple): + "Represents an event in an execution trace." + timestamp: Timestamp + action: "Action" + duration: Duration + + def get_dump_dict(self) -> Dict[str, Any]: + "Return a dict to dump for debugging." + return { + "timestamp": self.timestamp, + "action": self.action, + "duration": self.duration, + } + + +@dataclass +class Action(ABC): + "Represents an action that can be invoked by the planner." + + robot: RobotName + "The robot that is executing the action." + + def get_duration(self) -> Duration: + "Return the estimated duration of the action in seconds." + return CONFIG.action_durations[self.get_pddl_name()] + + def invalid_reason( # pylint: disable=unused-argument + self, sim_state: "SimState" + ) -> str: + """ + If action is invalid in `sim_state`, return the reason why. If valid, return an empty + string. The default implementation always returns an empty string. Derived classes can + override as needed. + """ + return "" + + def is_valid(self, sim_state: "SimState") -> bool: + """ + Return True if the action can be executed in `sim_state`. + """ + return self.invalid_reason(sim_state) == "" + + def get_pddl_name(self) -> str: + "Return the name of this action type in the PDDL model." + return self.__class__.__name__.replace("Action", "").lower() + + def get_pddl_args(self) -> List[Any]: + "Return the arguments of this action type in the PDDL model." + raise NotImplementedError() # Note: Can't mark as @abstractmethod due to mypy limitation + + def __repr__(self): + args_str = " ".join((str(arg) for arg in self.get_pddl_args())) + return f"({self.get_pddl_name()} {args_str})" + + def start(self, sim_state: "SimState") -> None: + """ + Modify `sim_state` as needed at the beginning of action execution. Derived classes that + need to extend start() should typically invoke the parent method. + """ + sim_state.trace.append( + TraceEvent(sim_state.elapsed_time, self, self.get_duration()) + ) + robot_state = sim_state.robot_states[self.robot] + robot_state.action = self + # It seems to be a PDDL convention to separate events by an epsilon time difference + # to prevent ambiguity about ordering. + sim_state.elapsed_time += 0.001 + + def end(self, sim_state: "SimState") -> None: + """ + Modify `sim_state` as needed at the end of action execution. Derived classes that need to + extend end() should typically invoke the parent method. + """ + robot_state = sim_state.robot_states[self.robot] + robot_state.action = None + # It seems to be a PDDL convention to separate events by an epsilon time difference + # to prevent ambiguity about ordering. + sim_state.elapsed_time += 0.001 + + def apply(self, sim_state: "SimState") -> None: + """ + Apply the action, modifying `sim_state`. + """ + end_time = sim_state.elapsed_time + self.get_duration() + self.start(sim_state) + sim_state.events.push((end_time, self.end)) + + +@dataclass +class RobotState: + "Represents the state of a robot." + + pos: LocationName + "The current location of the robot." + + action: Optional[Action] + "The action the robot is currently executing (or None if it is idle)." + + reserved: List[LocationName] + """ + A list of locations the robot currently has reserved. (Places the robot is expected to move + through given the action it is currently executing.) + """ + + def get_dump_dict(self) -> Dict[str, Any]: + "Return a dict to dump for debugging." + return {"pos": self.pos, "action": repr(self.action), "reserved": self.reserved} + + +class PriorityQueue(Generic[T]): + "Priority queue implemented as a list that maintains the heap invariant." + + def __init__(self, seq: Optional[Iterable[T]] = None): + ":param seq: Initial contents of the queue (need not be ordered)." + if seq is None: + self._q: List[T] = [] + else: + self._q = list(seq) + heapq.heapify(self._q) + + def __iter__(self) -> Iterator[T]: + "Iterate through the queue in order non-destructively. (Not optimized, for debugging.)" + return iter(sorted(self._q)) + + def __bool__(self) -> bool: + "Return True if the queue is non-empty." + return bool(self._q) + + def push(self, item: T) -> None: + "Push a new item onto the queue." + heapq.heappush(self._q, item) + + def pop(self) -> T: + "Pop and return the head of the queue in priority order." + return heapq.heappop(self._q) + + +@dataclass +class SimState: + "Represents the overall state of the multi-robot sim." + + robot_states: Dict[RobotName, RobotState] + "Initial robot states." + + elapsed_time: Timestamp = 0.0 + "Elapsed time since start of simulation (seconds)." + + trace: ExecutionTrace = field(default_factory=list) + "Trace of timestamped actions executed so far in the sim." + + completed: Dict[Any, bool] = field(default_factory=dict) + "Tracks completion status for actions that subclass MarkCompleteAction." + + events: PriorityQueue[PendingEvent] = field(default_factory=PriorityQueue) + "Pending events queued by earlier actions." + + def get_dump_dict(self) -> Dict[str, Any]: + "Return a dict to dump for debugging." + return { + "elapsed_time": self.elapsed_time, + "robot_states": { + robot: state.get_dump_dict() + for robot, state in self.robot_states.items() + }, + "events": list(self.events), + "trace": [e.get_dump_dict() for e in self.trace], + "completed": self.completed, + } + + def warp(self) -> None: + """ + Warp the simulation forward in time to the next queued event and apply its event callback. + AKA "wait for something to happen". + """ + if not self.events: + return + event_time, event_func = self.events.pop() + self.elapsed_time = event_time + event_func(self) + + +@dataclass(repr=False) +class MarkCompleteAction(Action): + """ + Represents an action that explicitly marks itself complete when it finishes executing. + """ + + order: OrderName + "Propagates goal ordering constraint from problem instance. Required by PDDL model." + + def end(self, sim_state: SimState) -> None: + super().end(sim_state) + sim_state.completed[repr(self)] = True + + def is_complete(self, sim_state: SimState) -> bool: + """ + Return True if this action has been completed in `sim_state`. + """ + return sim_state.completed.get(repr(self), False) + + +def get_collision_check_locations( + from_pos: LocationName, to_pos: LocationName +) -> List[str]: + """ + Return neighbors of `to_pos` that are distinct from `from_pos` and are flying locations. A move + is invalid if one of these collision check locations is reserved by another robot. The PDDL + models of some actions also require collision check locations to be specified as arguments. + """ + to_ind = CONFIG.location_lookup[to_pos] + to_neighbors_ind = CONFIG.neighbors[to_ind] + to_neighbors = [CONFIG.locations[n] for n in to_neighbors_ind] + return [n for n in to_neighbors if is_location_flying(n) and n != from_pos] + + +def get_collision_reason( + from_pos: LocationName, to_pos: LocationName, robot: RobotName, sim_state: SimState +) -> str: + """ + Return the reason why a move from `from_pos` to `to_pos` with `robot` in `sim_state` fails + collision checking. Return an empty string if the move passes the collision check. + """ + check_locs = get_collision_check_locations(from_pos, to_pos) + other_robots = [r for r in CONFIG.robots if r != robot] + others_reserved = set( + itertools.chain( + *(sim_state.robot_states[robot].reserved for robot in other_robots) + ) + ) + reserved_check_locs = others_reserved.intersection(check_locs) + if reserved_check_locs: + return ( + f"Expected no collision check locations adjacent to `to_pos` {to_pos} to be reserved by" + f" other robots, but these are: {reserved_check_locs}" + ) + return "" # ok + + +@dataclass(repr=False) +class AbstractMoveAction(Action): + "Represents a single-step move action." + + from_pos: LocationName + "The robot's starting location." + + to: LocationName + "The location the robot should move to." + + def get_collision_check_locations(self) -> List[str]: + """ + Return collision check locations for this move action. + """ + return get_collision_check_locations(self.from_pos, self.to) + + def get_pddl_args(self) -> List[Any]: + return [self.robot, self.from_pos, self.to] + + def invalid_reason(self, sim_state: SimState) -> str: + robot_state = sim_state.robot_states[self.robot] + from_ind = CONFIG.location_lookup[robot_state.pos] + to_ind = CONFIG.location_lookup[self.to] + + # The robot can only move to an immediate neighbor of its current location + if to_ind not in CONFIG.neighbors[from_ind]: + return f"Expected `to` {self.to} to be a neighbor of `from_pos` {self.from_pos}" + + # The robot can't move to a location reserved by another robot. + for robot, robot_state in sim_state.robot_states.items(): + if robot != self.robot: + if self.to in robot_state.reserved: + return f"Found `to` {self.to} is reserved by other robot {robot}" + + # Collision check locations must not be reserved. + collision_reason = get_collision_reason( + self.from_pos, self.to, self.robot, sim_state + ) + if collision_reason: + return collision_reason + + return "" # valid + + def start(self, sim_state: SimState) -> None: + super().start(sim_state) + robot_state = sim_state.robot_states[self.robot] + robot_state.reserved = [robot_state.pos, self.to] + + def end(self, sim_state: SimState) -> None: + super().end(sim_state) + robot_state = sim_state.robot_states[self.robot] + robot_state.reserved = [self.to] + robot_state.pos = self.to + + +class MoveAction(AbstractMoveAction): + "Represents a move action from one flying location to another." + + def get_pddl_args(self) -> List[Any]: + # One check location argument is required for move + return super().get_pddl_args() + self.get_collision_check_locations()[:1] + + def invalid_reason(self, sim_state: SimState) -> str: + super_reason = super().invalid_reason(sim_state) + if super_reason: + return super_reason + if not is_location_flying(self.from_pos): + return f"Expected `from_pos` {self.from_pos} to be a flying location" + if not is_location_flying(self.to): + return f"Expected `to` {self.to} to be a flying location" + return "" # ok + + +class DockAction(AbstractMoveAction): + "Represents a dock action (from a flying location to a dock berth)." + + def invalid_reason(self, sim_state: SimState) -> str: + super_reason = super().invalid_reason(sim_state) + if super_reason: + return super_reason + if not is_location_flying(self.from_pos): + return f"Expected `from_pos` {self.from_pos} to be a flying location" + if not is_location_berth(self.to): + return f"Expected `to` {self.to} to be a berth" + return "" # ok + + +class UndockAction(AbstractMoveAction): + "Represents an undock action (from a dock berth to a flying location)." + + def get_pddl_args(self) -> List[Any]: + # Two check location arguments are required for undock + return super().get_pddl_args() + self.get_collision_check_locations()[:2] + + def invalid_reason(self, sim_state: SimState) -> str: + super_reason = super().invalid_reason(sim_state) + if super_reason: + return super_reason + if not is_location_berth(self.from_pos): + return f"Expected `from_pos` {self.from_pos} to be a berth" + if not is_location_flying(self.to): + return f"Expected `to` {self.to} to be a flying location" + return "" # ok + + +@dataclass(repr=False) +class PanoramaAction(MarkCompleteAction): + "Represents a panorama action." + + location: LocationName + "Where to acquire the panorama." + + def get_pddl_args(self) -> List[Any]: + return [self.robot, self.order, self.location] + + def invalid_reason(self, sim_state: SimState) -> str: + robot_state = sim_state.robot_states[self.robot] + if robot_state.pos != self.location: + return f"Expected robot pos {robot_state.pos} to match panorama pos {self.location}" + return "" # ok + + +@dataclass(repr=False) +class StereoAction(MarkCompleteAction): + "Represents a stereo survey action." + + base: LocationName + "The location where the stereo survey starts and ends." + + bound: LocationName + "The other end of the interval of locations that the robot visits during the survey." + + def get_pddl_args(self) -> List[Any]: + # Two collision check arguments are required for stereo action + check_locs = get_collision_check_locations(self.base, self.bound)[:2] + return [self.robot, self.order, self.base, self.bound] + check_locs + + def invalid_reason(self, sim_state: SimState) -> str: + robot_state = sim_state.robot_states[self.robot] + if robot_state.pos != self.base: + return f"Expected robot pos {robot_state.pos} to match stereo survey base {self.base}" + + # Collision check locations must not be reserved + collision_reason = get_collision_reason( + self.base, self.bound, self.robot, sim_state + ) + if collision_reason: + return collision_reason + + return "" # ok + + def start(self, sim_state: SimState) -> None: + super().start(sim_state) + robot_state = sim_state.robot_states[self.robot] + robot_state.reserved = [self.base, self.bound] + + def end(self, sim_state: SimState) -> None: + super().end(sim_state) + robot_state = sim_state.robot_states[self.robot] + robot_state.reserved = [self.base] + + +@dataclass +class Goal(ABC): + "Represents an abstract goal." + + robot: RobotName + "The robot primarily responsible for achieving the goal." + + def is_complete(self, exec_state: "ExecState") -> bool: + "Return True if the goal has been completed in `exec_state`." + raise NotImplementedError() # Note: Can't mark as @abstractmethod due to mypy limitation + + def get_next_action(self, exec_state: "ExecState") -> Action: + "Return the next action to apply to achieve the goal given `exec_state`." + raise NotImplementedError() # Note: Can't mark as @abstractmethod due to mypy limitation + + +@dataclass +class RobotExecState: + "Represents the execution state of a robot." + + goals: List[Goal] + "Goals of the robot." + + goal_index: int = 0 + """ + Number of goals that the robot has completed. If less than `len(goals)`, this can be + interpreted as the index of the current goal. (Or if all goals have been completed, there is no + current goal.) + """ + + goal_start_time: Timestamp = 0.0 + """ + When the most recently completed goal was completed. (Can be interpreted as when the current + goal became active.) Set to 0.0 if no goal has been completed yet. + """ + + status: RobotStatus = "INIT" + """ + Execution states of the robot. DONE = completed all goals, BLOCKED = the next action to perform + is not (yet) valid, ACTIVE = robot is actively working on a goal. + """ + + blocked_action: Optional[Action] = None + """ + If status is "BLOCKED", this is the next action that is currently invalid. Otherwise, None. + """ + + blocked_reason: str = "" + """ + If status is "BLOCKED", this is the reason why `blocked_action` is invalid. Otherwise, "". + """ + + def get_goal(self) -> Optional[Goal]: + """ + Return the currently active goal, or None if all goals have been achieved. + """ + if self.goal_index >= len(self.goals): + return None + return self.goals[self.goal_index] + + def is_active(self) -> bool: + "Return True if the robot is actively working on a goal." + return self.status == "ACTIVE" + + def is_done(self) -> bool: + "Return True if the robot has achieved all of its goals." + return self.status == "DONE" + + def get_dump_dict(self) -> Dict[str, Any]: + "Return a dict to dump for debugging." + result = { + "goals": [repr(g) for g in self.goals], + "goal_index": self.goal_index, + "goal_start_time": self.goal_start_time, + "status": self.status, + } + if self.status == "BLOCKED": + result["blocked_action"] = repr(self.blocked_action) + result["blocked_reason"] = repr(self.blocked_reason) + return result + + +@dataclass +class ExecState: + "Represents the execution state of the multi-robot system." + + sim_state: SimState + "The initial simulation state." + + robot_exec_states: Dict[RobotName, RobotExecState] + "The initial execution states of the robots in the multi-robot system." + + def is_any_robot_active(self) -> bool: + "Return True if any robot is actively working on a goal." + return any((rstate.is_active() for rstate in self.robot_exec_states.values())) + + def are_all_robots_done(self) -> bool: + "Return True if all robots have achieved all of their goals." + return all((rstate.is_done() for rstate in self.robot_exec_states.values())) + + def call_next_action_internal(self, robot: RobotName) -> RobotStatus: + "Helper for call_next_action() that returns the updated robot status." + robot_exec_state = self.robot_exec_states[robot] + while True: + robot_goal = robot_exec_state.get_goal() + if robot_goal is None: + return "DONE" + if robot_goal.is_complete(self): + robot_exec_state.goal_index += 1 + robot_exec_state.goal_start_time = self.sim_state.elapsed_time + continue + break + action = robot_goal.get_next_action(self) + + invalid_reason = action.invalid_reason(self.sim_state) + if invalid_reason: + robot_exec_state.blocked_action = action + robot_exec_state.blocked_reason = invalid_reason + return "BLOCKED" + robot_exec_state.blocked_action = None + robot_exec_state.blocked_reason = "" + + action.apply(self.sim_state) + return "ACTIVE" + + def call_next_action(self, robot: RobotName) -> None: + """ + Iterate through goals, starting with the current goal, until reaching a goal that is not + completed yet, and apply that goal's next action if it is valid in the current state. + Update the robot execution status. + """ + robot_exec_state = self.robot_exec_states[robot] + robot_exec_state.status = self.call_next_action_internal(robot) + + def run_step(self) -> None: + """ + Try to apply each robot's next action if it is idle. + """ + for robot in CONFIG.robots: + robot_state = self.sim_state.robot_states[robot] + if robot_state.action is None: + self.call_next_action(robot) + + def get_dump_dict(self) -> Dict[str, Any]: + "Return a dict to dump for debugging." + return { + "sim_state": self.sim_state.get_dump_dict(), + "robot_exec_states": { + robot: state.get_dump_dict() + for robot, state in self.robot_exec_states.items() + }, + } + + def __repr__(self) -> str: + return yaml.safe_dump(self.get_dump_dict(), sort_keys=False) + + def run(self) -> None: + """ + Achieve all goals in the multi-robot system. + """ + while True: + self.run_step() + self.sim_state.warp() + if self.are_all_robots_done(): + break + if not self.is_any_robot_active(): + print(self) + raise RuntimeError( + "Can't achieve all goals. Not done but no active robots!" + ) + + +@dataclass +class MoveGoal(Goal): + "Represents a move goal (may require multiple single-step move actions)." + + to: LocationName + "The location the robot should move to." + + def __repr__(self) -> str: + return f"(robot-at {self.robot} {self.to})" + + def is_complete(self, exec_state: ExecState) -> bool: + "Return True if the robot is already at the desired location in `exec_state`." + robot_state = exec_state.sim_state.robot_states[self.robot] + return robot_state.pos == self.to + + def get_next_action(self, exec_state: ExecState) -> Action: + "Return a single-step move action toward the desired location from `exec_state`." + robot_state = exec_state.sim_state.robot_states[self.robot] + to_ind = CONFIG.location_lookup[self.to] + pos_ind = CONFIG.location_lookup[robot_state.pos] + next_ind = get_move_location(goal_region=[to_ind], current_pos=pos_ind) + next_loc = CONFIG.locations[next_ind] + if is_location_berth(next_loc): + action_type: Type[AbstractMoveAction] = DockAction + elif is_location_berth(robot_state.pos): + action_type = UndockAction + else: + action_type = MoveAction + return action_type(robot=self.robot, from_pos=robot_state.pos, to=next_loc) + + +@dataclass +class MarkCompleteGoal(Goal): + "Represents a goal that is complete when a corresponding MarkCompleteAction finishes." + + order: OrderName + "Propagates goal ordering constraint from problem instance. Required by PDDL model." + + def get_completing_action(self) -> MarkCompleteAction: + "Return the MarkCompleteAction whose completion satisfies this MarkCompleteGoal." + raise NotImplementedError() # Note: Can't mark as @abstractmethod due to mypy limitation + + def __repr__(self) -> str: + return repr(self.get_completing_action()).replace("(", "(completed-", 1) + + def is_complete(self, exec_state: ExecState) -> bool: + return self.get_completing_action().is_complete(exec_state.sim_state) + + +@dataclass +class PanoramaGoal(MarkCompleteGoal): + "Represents a panorama goal." + + location: LocationName + "The location where the panorama should be acquired." + + def get_completing_action(self) -> MarkCompleteAction: + return PanoramaAction( + robot=self.robot, order=self.order, location=self.location + ) + + def get_next_action(self, exec_state: ExecState) -> Action: + """ + Return the next action needed to complete the panorama. The result will be either a move + toward the desired location, or the corresponding PanoramaAction if already there. + """ + move_goal = MoveGoal(self.robot, to=self.location) + if not move_goal.is_complete(exec_state): + return move_goal.get_next_action(exec_state) + return self.get_completing_action() + + +@dataclass +class StereoGoal(MarkCompleteGoal): + "Represents a stereo survey goal." + + base: LocationName + "The location where the stereo survey starts and ends." + + bound: LocationName + "The other end of the interval of locations that the robot visits during the survey." + + def get_completing_action(self) -> MarkCompleteAction: + return StereoAction( + robot=self.robot, order=self.order, base=self.base, bound=self.bound + ) + + def get_next_action(self, exec_state: ExecState) -> Action: + """ + Return the next action needed to complete the survey. The result will be either a move + toward the desired location, or the corresponding StereoAction if already there. + """ + move_goal = MoveGoal(self.robot, to=self.base) + if not move_goal.is_complete(exec_state): + return move_goal.get_next_action(exec_state) + return self.get_completing_action() + + +def format_trace(trace: ExecutionTrace) -> str: + "Return `trace` formatted in the standard PDDL plan output format used by POPF." + out = io.StringIO() + for event in trace: + print(f"{event.timestamp:.3f}: {event.action} [{event.duration:.3f}]", file=out) + return out.getvalue() + + +def parse_pddl(input_path: pathlib.Path) -> PddlExpression: + """ + Return the result of parsing the file at `input_path` as a LISP S-expression (encompasses + PDDL domains and problem instances). + """ + comment = pp.Regex(r";.*").setName("LISP style comment") + parser = pp.nestedExpr().ignore(comment) + return parser.parseString(input_path.read_text())[0] + + +def get_action_durations(domain: PddlExpression) -> Dict[PddlActionName, Duration]: + """ + Return action durations parsed from `domain`. + """ + actions = [e for e in domain[2:] if e[0] == ":durative-action"] + action_durations = {} + for action in actions: + action_name = action[1] + duration_index = action.asList().index(":duration") + duration_arg = action[duration_index + 1] + op, variable, duration_str = duration_arg + assert ( + op == "=" and variable == "?duration" + ), f"Expected :duration arg to have the form (= ?duration ...), got {duration_arg}" + try: + duration = float(duration_str) + except ValueError as exc: + raise RuntimeError( + f"Expected duration value to be a float, got {repr(duration_str)}" + ) from exc + action_durations[action_name] = duration + return action_durations + + +def get_location_config(problem: PddlExpression) -> Dict[str, Any]: + """ + Return location configuration (available locations and neighbor lists) parsed from `problem`. + """ + (init_predicates,) = [e for e in problem[2:] if e[0] == ":init"] + move_connected_predicates = [p for p in init_predicates if p[0] == "move-connected"] + dock_connected_predicates = [p for p in init_predicates if p[0] == "dock-connected"] + neighbor_edges = [(loc1, loc2) for pred, loc1, loc2 in move_connected_predicates] + + # add dock-connected edges in both directions + neighbor_edges += [(loc1, loc2) for pred, loc1, loc2 in dock_connected_predicates] + neighbor_edges += [(loc2, loc1) for pred, loc1, loc2 in dock_connected_predicates] + + locations = sorted(set(itertools.chain(*neighbor_edges))) + location_lookup = {name: ind for ind, name in enumerate(locations)} + neighbor_edge_ind = [ + (location_lookup[loc1], location_lookup[loc2]) for loc1, loc2 in neighbor_edges + ] + neighbors = [ + [loc2 for loc1, loc2 in neighbor_edge_ind if loc1 == loc_ind] + for loc_ind in range(len(locations)) + ] + return { + "locations": locations, + "location_lookup": location_lookup, + "neighbors": neighbors, + } + + +def get_goal_from_pddl(goal_expr: PddlExpression) -> Optional[Goal]: + """ + Return the custom planner goal type corresponding to PDDL `goal_expr`. + """ + goal_type = goal_expr[0] + if goal_type == "completed-panorama": + return PanoramaGoal( + robot=goal_expr[1], order=goal_expr[2], location=goal_expr[3] + ) + if goal_type == "completed-stereo": + return StereoGoal( + robot=goal_expr[1], + order=goal_expr[2], + base=goal_expr[3], + bound=goal_expr[4], + ) + if goal_type == "robot-at": + return MoveGoal(robot=goal_expr[1], to=goal_expr[2]) + + print( + f"WARNING: Can't map PDDL goal_type {goal_type} to custom planner goal type, ignoring", + file=sys.stderr, + ) + return None + + +def filter_none(seq: Iterable[Optional[T]]) -> List[T]: + "Return `seq` with None elements filtered out." + return [elt for elt in seq if elt is not None] + + +def get_objects_by_type( + problem: PddlExpression, +) -> Dict[PddlTypeName, List[PddlObjectName]]: + "Return robot names parsed from PDDL problem instance." + (objects_clause,) = [e for e in problem[2:] if e[0] == ":objects"] + # object_decls example: ["foo", "bar", "-", "int", "baz", "-", "float"] + object_decls = list(objects_clause[1:]) + + # desired objects_of_type in example: {"int": ["foo", "bar"], "float": ["baz"]} + objects_of_type = {} + while object_decls: + type_marker_ind = object_decls.index("-") + type_arg = object_decls[type_marker_ind + 1] + objects_of_type[type_arg] = object_decls[:type_marker_ind] + object_decls = object_decls[type_marker_ind + 2 :] + + return objects_of_type + + +def get_robot_goals(problem: PddlExpression) -> Dict[RobotName, List[Goal]]: + """ + Return a mapping of robot name to robot goals parsed from `problem`. + """ + (goal_clause,) = [e for e in problem[2:] if e[0] == ":goal"] + compound_expr = goal_clause[1] + if compound_expr[0] == "and": + goal_exprs = compound_expr[1:] + else: + goal_exprs = [compound_expr] + goals = filter_none([get_goal_from_pddl(goal_expr) for goal_expr in goal_exprs]) + robot_goals = { + robot: [g for g in goals if g.robot == robot] for robot in CONFIG.robots + } + return robot_goals + + +def get_robot_states(problem: PddlExpression) -> Dict[RobotName, RobotState]: + """ + Return a mapping of robot name to robot state parsed from `problem`. + """ + robot_states = { + robot: RobotState(pos="", action=None, reserved=[]) for robot in CONFIG.robots + } + (init_clause,) = [e for e in problem[2:] if e[0] == ":init"] + init_predicates = init_clause[1:] + robot_at_predicates = [p for p in init_predicates if p[0] == "robot-at"] + for _, robot, pos in robot_at_predicates: + robot_states[robot].pos = pos + robot_states[robot].reserved = [pos] + return robot_states + + +def survey_planner(domain_path: pathlib.Path, problem_path: pathlib.Path): + "Primary driver function for custom planning." + + domain_expr = parse_pddl(domain_path) + CONFIG.action_durations = get_action_durations(domain_expr) + + problem_expr = parse_pddl(problem_path) + CONFIG.robots = get_objects_by_type(problem_expr)["robot"] + + location_config = get_location_config(problem_expr) + CONFIG.locations = location_config["locations"] + CONFIG.location_lookup = location_config["location_lookup"] + CONFIG.neighbors = location_config["neighbors"] + + robot_goals = get_robot_goals(problem_expr) + robot_states = get_robot_states(problem_expr) + + sim_state = SimState(robot_states=robot_states) + robot_exec_states = { + robot: RobotExecState(robot_goals[robot]) for robot in CONFIG.robots + } + exec_state = ExecState(sim_state=sim_state, robot_exec_states=robot_exec_states) + + exec_state.run() + print(format_trace(exec_state.sim_state.trace)) + + +class CustomFormatter( + argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter +): + "Custom formatter for argparse that combines mixins." + + +def main(): + "Parse command-line arguments and invoke survey_planner()." + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=CustomFormatter + ) + parser.add_argument( + "domain", + help="Path for input PDDL domain", + type=pathlib.Path, + default=PDDL_DIR / "domain_survey.pddl", + nargs="?", + ) + parser.add_argument( + "problem", + help="Path for input PDDL problem", + type=pathlib.Path, + default=PDDL_DIR / "problem_jem_survey.pddl", + nargs="?", + ) + args = parser.parse_args() + + survey_planner(domain_path=args.domain, problem_path=args.problem) + + +if __name__ == "__main__": + main()