diff --git a/requirements.txt b/requirements.txt index f7390328387f51f2af1a9e9440c933d28032a9a7..0232814bcf9174380624dfb3061607c61a25ddba 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ gym==0.10.9 -numpy==1.15 +numpy>=1.16.1 yacs>=0.1.5 # visualization optional dependencies imageio>=2.2.0 diff --git a/test/test_habitat_env.py b/test/test_habitat_env.py index 72d9a2380106d997f28c8a8bab216a72d05fe993..3852b4ed7b43338e3238f9935d074846667ae039 100644 --- a/test/test_habitat_env.py +++ b/test/test_habitat_env.py @@ -150,21 +150,22 @@ def test_env(): NavigationEpisode( episode_id="0", scene_id=config.SIMULATOR.SCENE, - start_position=[3.00611, 0.072447, -2.67867], + start_position=[-3.0133917, 0.04623024, 7.3064547], start_rotation=[0, 0.163276, 0, 0.98658], - goals=[NavigationGoal([3.00611, 0.072447, -2.67867])], + goals=[NavigationGoal([-3.0133917, 0.04623024, 7.3064547])], info={"geodesic_distance": 0.001}, ) ] - env.reset() + non_stop_actions = [ k for k, v in SIM_ACTION_TO_NAME.items() if v != SimulatorActions.STOP.value ] for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS): - env.step(np.random.choice(non_stop_actions)) + act = np.random.choice(non_stop_actions) + env.step(act) # check for steps limit on environment assert env.episode_over is True, ( @@ -230,9 +231,9 @@ def test_rl_env(): NavigationEpisode( episode_id="0", scene_id=config.SIMULATOR.SCENE, - start_position=[3.00611, 0.072447, -2.67867], + start_position=[-3.0133917, 0.04623024, 7.3064547], start_rotation=[0, 0.163276, 0, 0.98658], - goals=[NavigationGoal([3.00611, 0.072447, -2.67867])], + goals=[NavigationGoal([-3.0133917, 0.04623024, 7.3064547])], info={"geodesic_distance": 0.001}, ) ]