Skip to content

Commit

Permalink
Merge pull request #804 from StanfordVL/remove_examples_readme
Browse files Browse the repository at this point in the history
Remove outdated examples README, fix geodesic potential, revive behavior task test
  • Loading branch information
cgokmen authored Jul 17, 2024
2 parents bfb7950 + 4c07aa4 commit fb80b39
Show file tree
Hide file tree
Showing 3 changed files with 9 additions and 32 deletions.
19 changes: 0 additions & 19 deletions omnigibson/examples/README.md

This file was deleted.

17 changes: 8 additions & 9 deletions omnigibson/tasks/point_navigation_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,20 +261,17 @@ def _sample_initial_pose_and_goal_pos(self, env, max_trials=100):
log.info("Sampled goal position: {}".format(goal_pos))
return initial_pos, initial_quat, goal_pos

def _get_geodesic_potential(self, env, no_path_reward=-1.0):
def _get_geodesic_potential(self, env):
"""
Get potential based on geodesic distance
Args:
env: environment instance
no_path_reward (float): Reward to return if no path is found to the goal position
Returns:
float: geodesic distance to the target position
"""
_, geodesic_dist = self.get_shortest_path_to_goal(env=env)
if geodesic_dist is None:
return no_path_reward
return geodesic_dist

def _get_l2_potential(self, env):
Expand All @@ -289,25 +286,27 @@ def _get_l2_potential(self, env):
"""
return T.l2_distance(env.robots[self._robot_idn].states[Pose].get_value()[0][:2], self._goal_pos[:2])

def get_potential(self, env, no_path_reward=-1.0):
def get_potential(self, env):
"""
Compute task-specific potential: distance to the goal
Args:
env (Environment): Environment instance
no_path_reward (float): Reward to return if no path is found to the goal position
Returns:
float: Computed potential
"""
if self._reward_type == "l2":
reward = self._get_l2_potential(env)
potentail = self._get_l2_potential(env)
elif self._reward_type == "geodesic":
reward = self._get_geodesic_potential(env, no_path_reward=no_path_reward)
potential = self._get_geodesic_potential(env)
# If no path is found, fall back to L2 potential
if potential is None:
potentail = self._get_l2_potential(env)
else:
raise ValueError(f"Invalid reward type! {self._reward_type}")

return reward
return potential

def _reset_agent(self, env):
# Reset agent
Expand Down
5 changes: 1 addition & 4 deletions tests/test_envs.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def task_tester(task_type):
"task": {
"type": task_type,
# BehaviorTask-specific
"activity_name": "assembling_gift_baskets",
"activity_name": "laying_wood_floors",
"online_object_sampling": True,
},
}
Expand Down Expand Up @@ -59,9 +59,6 @@ def test_point_navigation_task():
task_tester("PointNavigationTask")


@pytest.mark.skip(
reason="currently broken because wicker baskets become fillable but do no have fillable volume annotation"
)
def test_behavior_task():
task_tester("BehaviorTask")

Expand Down

0 comments on commit fb80b39

Please sign in to comment.