diff --git a/baselines/agents/ppo_agents.py b/baselines/agents/ppo_agents.py
index db6ad957bc8801954006dcf1be4e5be90b148daf..0656eed499e21c5139807d1992afd61292ad8f9a 100644
--- a/baselines/agents/ppo_agents.py
+++ b/baselines/agents/ppo_agents.py
@@ -60,12 +60,17 @@ class PPOAgent(Agent):
 
         action_spaces = Discrete(4)
 
-        self.device = torch.device("cuda:{}".format(config.PTH_GPU_ID))
+        self.device = (
+            torch.device("cuda:{}".format(config.PTH_GPU_ID))
+            if torch.cuda.is_available()
+            else torch.device("cpu")
+        )
         self.hidden_size = config.HIDDEN_SIZE
 
         random.seed(config.RANDOM_SEED)
         torch.random.manual_seed(config.RANDOM_SEED)
-        torch.backends.cudnn.deterministic = True
+        if torch.cuda.is_available():
+            torch.backends.cudnn.deterministic = True
 
         self.actor_critic = Policy(
             observation_space=observation_spaces,
diff --git a/baselines/agents/simple_agents.py b/baselines/agents/simple_agents.py
index 8906b86f469327bee919890b394a71f9fb4eebc6..8010a8b4b06241379a5065958b6c50718d182634 100644
--- a/baselines/agents/simple_agents.py
+++ b/baselines/agents/simple_agents.py
@@ -11,16 +11,10 @@ from math import pi
 import numpy as np
 
 import habitat
-from habitat.sims.habitat_simulator import (
-    SimulatorActions,
-    SIM_ACTION_TO_NAME,
-    SIM_NAME_TO_ACTION,
-)
+from habitat.sims.habitat_simulator import SimulatorActions
 
 NON_STOP_ACTIONS = [
-    k
-    for k, v in SIM_ACTION_TO_NAME.items()
-    if v != SimulatorActions.STOP.value
+    v for v in range(len(SimulatorActions)) if v != SimulatorActions.STOP.value
 ]
 
 
@@ -37,7 +31,7 @@ class RandomAgent(habitat.Agent):
 
     def act(self, observations):
         if self.is_goal_reached(observations):
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
         else:
             action = np.random.choice(NON_STOP_ACTIONS)
         return action
@@ -46,9 +40,9 @@ class RandomAgent(habitat.Agent):
 class ForwardOnlyAgent(RandomAgent):
     def act(self, observations):
         if self.is_goal_reached(observations):
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
         else:
-            action = SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value]
+            action = SimulatorActions.FORWARD.value
         return action
 
 
@@ -59,16 +53,13 @@ class RandomForwardAgent(RandomAgent):
 
     def act(self, observations):
         if self.is_goal_reached(observations):
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
         else:
             if np.random.uniform(0, 1, 1) < self.FORWARD_PROBABILITY:
-                action = SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value]
+                action = SimulatorActions.FORWARD.value
             else:
                 action = np.random.choice(
-                    [
-                        SIM_NAME_TO_ACTION[SimulatorActions.LEFT.value],
-                        SIM_NAME_TO_ACTION[SimulatorActions.RIGHT.value],
-                    ]
+                    [SimulatorActions.LEFT.value, SimulatorActions.RIGHT.value]
                 )
 
         return action
@@ -92,20 +83,20 @@ class GoalFollower(RandomAgent):
         if angle_to_goal > pi or (
             (angle_to_goal < 0) and (angle_to_goal > -pi)
         ):
-            action = SIM_NAME_TO_ACTION[SimulatorActions.RIGHT.value]
+            action = SimulatorActions.RIGHT.value
         else:
-            action = SIM_NAME_TO_ACTION[SimulatorActions.LEFT.value]
+            action = SimulatorActions.LEFT.value
         return action
 
     def act(self, observations):
         if self.is_goal_reached(observations):
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
         else:
             angle_to_goal = self.normalize_angle(
                 np.array(observations["pointgoal"][1])
             )
             if abs(angle_to_goal) < self.angle_th:
-                action = SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value]
+                action = SimulatorActions.FORWARD.value
             else:
                 action = self.turn_towards_goal(angle_to_goal)
 
diff --git a/baselines/agents/slam_agents.py b/baselines/agents/slam_agents.py
index 741718639ffa7a35db3b8d2da3836996ff081163..c3e6ba97b995db04248127272fc5e9c55054d185 100644
--- a/baselines/agents/slam_agents.py
+++ b/baselines/agents/slam_agents.py
@@ -26,8 +26,6 @@ from baselines.slambased.reprojection import (
 )
 from habitat.sims.habitat_simulator import (
     SimulatorActions,
-    SIM_ACTION_TO_NAME,
-    SIM_NAME_TO_ACTION,
 )
 from baselines.slambased.mappers import DirectDepthMapper
 from baselines.slambased.path_planners import DifferentiableStarPlanner
@@ -107,7 +105,7 @@ class RandomAgent(object):
         # Act
         # Check if we are done
         if self.is_goal_reached():
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
         else:
             action = random.randint(0, self.num_actions - 1)
         return action
@@ -127,34 +125,35 @@ class BlindAgent(RandomAgent):
     def decide_what_to_do(self):
         distance_to_goal = self.obs["pointgoal"][0]
         angle_to_goal = norm_ang(np.array(self.obs["pointgoal"][1]))
-        command = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+        command = SimulatorActions.STOP.value
         if distance_to_goal <= self.pos_th:
             return command
         if abs(angle_to_goal) < self.angle_th:
-            command = "move_forward"
+            command = SimulatorActions.FORWARD.value
         else:
             if (angle_to_goal > 0) and (angle_to_goal < pi):
-                command = "look_left"
+                command = SimulatorActions.LEFT.value
             elif angle_to_goal > pi:
-                command = "look_right"
+                command = SimulatorActions.RIGHT.value
             elif (angle_to_goal < 0) and (angle_to_goal > -pi):
-                command = "look_right"
+                command = SimulatorActions.RIGHT.value
             else:
-                command = "look_left"
+                command = SimulatorActions.LEFT.value
+
         return command
 
     def act(self, habitat_observation=None, random_prob=0.1):
         self.update_internal_state(habitat_observation)
         # Act
         if self.is_goal_reached():
-            return SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            return SimulatorActions.STOP.value
         command = self.decide_what_to_do()
         random_action = random.randint(0, self.num_actions - 1)
         act_randomly = np.random.uniform(0, 1, 1) < random_prob
         if act_randomly:
             action = random_action
         else:
-            action = SIM_NAME_TO_ACTION[command]
+            action = command
         return action
 
 
@@ -265,8 +264,7 @@ class ORBSLAM2Agent(RandomAgent):
                     .to(self.device),
                 )
                 if (
-                    SIM_ACTION_TO_NAME[self.action_history[-1]]
-                    == "move_forward"
+                    self.action_history[-1] == SimulatorActions.FORWARD.value
                 ):
                     self.unseen_obstacle = (
                         previous_step.item() <= 0.001
@@ -332,7 +330,7 @@ class ORBSLAM2Agent(RandomAgent):
         )
         success = self.is_goal_reached()
         if success:
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
             self.action_history.append(action)
             return action
         # Plan action
@@ -486,7 +484,7 @@ class ORBSLAM2Agent(RandomAgent):
         return path, planned_waypoints
 
     def planner_prediction_to_command(self, p_next):
-        command = "stop"
+        command = SimulatorActions.STOP.value
         p_init = self.pose6D.squeeze()
         d_angle_rot_th = self.angle_th
         pos_th = self.pos_th
@@ -496,29 +494,29 @@ class ORBSLAM2Agent(RandomAgent):
             get_direction(p_init, p_next, ang_th=d_angle_rot_th, pos_th=pos_th)
         )
         if abs(d_angle) < d_angle_rot_th:
-            command = "move_forward"
+            command = SimulatorActions.FORWARD.value
         else:
             if (d_angle > 0) and (d_angle < pi):
-                command = "look_left"
+                command = SimulatorActions.LEFT.value
             elif d_angle > pi:
-                command = "look_right"
+                command = SimulatorActions.RIGHT.value
             elif (d_angle < 0) and (d_angle > -pi):
-                command = "look_right"
+                command = SimulatorActions.RIGHT.value
             else:
-                command = "look_left"
+                command = SimulatorActions.LEFT.value
         return command
 
     def decide_what_to_do(self):
         action = None
         if self.is_goal_reached():
-            action = SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            action = SimulatorActions.STOP.value
             return action
         if self.unseen_obstacle:
-            command = "look_right"
-            return SIM_NAME_TO_ACTION[command]
-        command = "stop"
+            command = SimulatorActions.RIGHT.value
+            return command
+        command = SimulatorActions.STOP.value
         command = self.planner_prediction_to_command(self.waypointPose6D)
-        return SIM_NAME_TO_ACTION[command]
+        return command
 
 class ORBSLAM2MonodepthAgent(ORBSLAM2Agent):
     def __init__(
diff --git a/baselines/train_ppo.py b/baselines/train_ppo.py
index 64a09dc4dc524c1f1086b43f3a7cfcbd2093d058..2f396eed054c55ea71c8d67b27c5ae8f148e6721 100644
--- a/baselines/train_ppo.py
+++ b/baselines/train_ppo.py
@@ -13,7 +13,7 @@ import numpy as np
 import torch
 import habitat
 from habitat import logger
-from habitat.sims.habitat_simulator import SimulatorActions, SIM_NAME_TO_ACTION
+from habitat.sims.habitat_simulator import SimulatorActions
 from habitat.config.default import get_config as cfg_env
 from config.default import cfg as cfg_baseline
 from habitat.datasets.pointnav.pointnav_dataset import PointNavDatasetV1
@@ -72,8 +72,7 @@ class NavRLEnv(habitat.RLEnv):
 
     def _episode_success(self):
         if (
-            self._previous_action
-            == SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+            self._previous_action == SimulatorActions.STOP.value
             and self._distance_target() < self._config_env.SUCCESS_DISTANCE
         ):
             return True
diff --git a/examples/benchmark.py b/examples/benchmark.py
index 87e6a97bd4db976a3d60c9a7ff22417be42150b0..470abddf37724b89a385b2c26dde9918cbf93b1b 100644
--- a/examples/benchmark.py
+++ b/examples/benchmark.py
@@ -7,7 +7,7 @@
 import argparse
 
 import habitat
-from habitat.sims.habitat_simulator import SimulatorActions, SIM_NAME_TO_ACTION
+from habitat.sims.habitat_simulator import SimulatorActions
 
 
 class ForwardOnlyAgent(habitat.Agent):
@@ -15,7 +15,7 @@ class ForwardOnlyAgent(habitat.Agent):
         pass
 
     def act(self, observations):
-        action = SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value]
+        action = SimulatorActions.FORWARD.value
         return action
 
 
diff --git a/examples/shortest_path_follower_example.py b/examples/shortest_path_follower_example.py
index 706179073e4099c3f384296718e078655633a0e4..e6cd147d3162f0f9017525c89f2222cf49fc0ffa 100644
--- a/examples/shortest_path_follower_example.py
+++ b/examples/shortest_path_follower_example.py
@@ -10,7 +10,6 @@ import shutil
 import imageio
 
 import habitat
-from habitat.sims.habitat_simulator import SIM_NAME_TO_ACTION
 from habitat.tasks.nav.shortest_path_follower import ShortestPathFollower
 
 IMAGE_DIR = os.path.join("examples", "images")
@@ -42,7 +41,7 @@ def shortest_path_example(mode):
             best_action = follower.get_next_action(
                 env.current_episode.goals[0].position
             )
-            observations = env.step(SIM_NAME_TO_ACTION[best_action.value])
+            observations = env.step(best_action.value)
             count_steps += 1
             im = observations["rgb"]
             imageio.imsave(os.path.join(dirname, "%03d.jpg" % count_steps), im)
diff --git a/habitat/sims/habitat_simulator.py b/habitat/sims/habitat_simulator.py
index 188ed2e453cdd42ddf203a9356c813c70f0ac12d..30c4ca5a4e60bf74738a5cc6947cc1b74747c8e0 100644
--- a/habitat/sims/habitat_simulator.py
+++ b/habitat/sims/habitat_simulator.py
@@ -34,10 +34,10 @@ def check_sim_obs(obs, sensor):
 
 
 class SimulatorActions(Enum):
-    LEFT = "look_left"
-    RIGHT = "look_right"
-    FORWARD = "move_forward"
-    STOP = "stop"
+    FORWARD = 0
+    LEFT = 1
+    RIGHT = 2
+    STOP = 3
 
 
 class HabitatSimRGBSensor(RGBSensor):
@@ -123,16 +123,6 @@ class HabitatSimSemanticSensor(SemanticSensor):
         return obs
 
 
-SIM_ACTION_TO_NAME = {
-    0: SimulatorActions.FORWARD.value,
-    1: SimulatorActions.LEFT.value,
-    2: SimulatorActions.RIGHT.value,
-    3: SimulatorActions.STOP.value,
-}
-
-SIM_NAME_TO_ACTION = {v: k for k, v in SIM_ACTION_TO_NAME.items()}
-
-
 class HabitatSim(habitat.Simulator):
     """Simulator wrapper over habitat-sim
 
@@ -157,25 +147,24 @@ class HabitatSim(habitat.Simulator):
             )
             sim_sensors.append(
                 getattr(
-                    habitat.sims.habitat_simulator,  # type: ignore
-                    sensor_cfg.TYPE,
+                    habitat.sims.habitat_simulator,
+                    sensor_cfg.TYPE,  # type: ignore
                 )(sensor_cfg)
             )
 
         self._sensor_suite = SensorSuite(sim_sensors)
         self.sim_config = self.create_sim_config(self._sensor_suite)
-        self._current_scene = self.sim_config.scene.id
+        self._current_scene = self.sim_config.sim_cfg.scene.id
         self._sim = habitat_sim.Simulator(self.sim_config)
         self._action_space = spaces.Discrete(
             len(self.sim_config.agents[0].action_space)
         )
 
         self._is_episode_active = False
-        self._controls = SIM_ACTION_TO_NAME
 
     def create_sim_config(
         self, _sensor_suite: SensorSuite
-    ) -> habitat_sim.SimulatorConfiguration:
+    ) -> habitat_sim.Configuration:
         sim_config = habitat_sim.SimulatorConfiguration()
         sim_config.scene.id = self.config.SCENE
         sim_config.gpu_device_id = self.config.HABITAT_SIM_V0.GPU_DEVICE_ID
@@ -208,18 +197,23 @@ class HabitatSim(habitat.Simulator):
         agent_config.sensor_specifications = sensor_specifications
         agent_config.action_space = {
             SimulatorActions.LEFT.value: habitat_sim.ActionSpec(
-                "lookLeft", {"amount": self.config.TURN_ANGLE}
+                "turn_left",
+                habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE),
             ),
             SimulatorActions.RIGHT.value: habitat_sim.ActionSpec(
-                "lookRight", {"amount": self.config.TURN_ANGLE}
+                "turn_right",
+                habitat_sim.ActuationSpec(amount=self.config.TURN_ANGLE),
             ),
             SimulatorActions.FORWARD.value: habitat_sim.ActionSpec(
-                "moveForward", {"amount": self.config.FORWARD_STEP_SIZE}
+                "move_forward",
+                habitat_sim.ActuationSpec(
+                    amount=self.config.FORWARD_STEP_SIZE
+                ),
             ),
-            SimulatorActions.STOP.value: habitat_sim.ActionSpec("stop", {}),
+            SimulatorActions.STOP.value: habitat_sim.ActionSpec("stop"),
         }
-        sim_config.agents = [agent_config]
-        return sim_config
+
+        return habitat_sim.Configuration(sim_config, [agent_config])
 
     @property
     def sensor_suite(self) -> SensorSuite:
@@ -259,12 +253,12 @@ class HabitatSim(habitat.Simulator):
             "episode is not active, environment not RESET or "
             "STOP action called previously"
         )
-        sim_action = self._controls[action]
-        if sim_action == SimulatorActions.STOP.value:
+
+        if action == SimulatorActions.STOP.value:
             self._is_episode_active = False
             sim_obs = self._sim.get_sensor_observations()
         else:
-            sim_obs = self._sim.step(sim_action)
+            sim_obs = self._sim.step(action)
 
         observations = self._sensor_suite.get_observations(sim_obs)
         return observations
@@ -296,6 +290,7 @@ class HabitatSim(habitat.Simulator):
         self.sim_config = self.create_sim_config(self._sensor_suite)
         if not is_same_scene:
             self._current_scene = config.SCENE
+            self._sim.close()
             del self._sim
             self._sim = habitat_sim.Simulator(self.sim_config)
 
@@ -321,41 +316,9 @@ class HabitatSim(habitat.Simulator):
             the source. For the last item in the returned list the action
             will be None.
         """
-        assert agent_id == 0, "No support of multi agent in {} yet.".format(
-            self.__class__.__name__
+        raise NotImplementedError(
+            "This function is no longer implemented.  Please use the greedy follower instead"
         )
-        action_pathfinder = self._sim.make_action_pathfinder(agent_id=agent_id)
-        action_shortest_path = habitat_sim.MultiGoalActionSpaceShortestPath()
-        action_shortest_path.requested_start.position = source.position
-        action_shortest_path.requested_start.rotation = source.rotation
-
-        for target in targets:
-            action_shortest_path.requested_ends.append(
-                habitat_sim.ActionSpacePathLocation(
-                    target.position, target.rotation
-                )
-            )
-
-        if not action_pathfinder.find_path(action_shortest_path):
-            return []
-
-        # add None action to last node in path
-        actions: List[Optional[int]] = [
-            SIM_NAME_TO_ACTION[action]
-            for action in action_shortest_path.actions
-        ]
-        actions.append(None)
-
-        shortest_path = [
-            ShortestPathPoint(position, rotation, action)
-            for position, rotation, action in zip(
-                action_shortest_path.points,
-                action_shortest_path.rotations,
-                actions,
-            )
-        ]
-
-        return shortest_path
 
     @property
     def up_vector(self):
@@ -415,11 +378,11 @@ class HabitatSim(habitat.Simulator):
 
     @property
     def index_stop_action(self):
-        return SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
+        return SimulatorActions.STOP.value
 
     @property
     def index_forward_action(self):
-        return SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value]
+        return SimulatorActions.FORWARD.value
 
     def _get_agent_config(self, agent_id: Optional[int] = None) -> Any:
         if agent_id is None:
@@ -432,9 +395,7 @@ class HabitatSim(habitat.Simulator):
         assert agent_id == 0, "No support of multi agent in {} yet.".format(
             self.__class__.__name__
         )
-        state = habitat_sim.AgentState()
-        self._sim.get_agent(agent_id).get_state(state)
-        return state
+        return self._sim.get_agent(agent_id).get_state()
 
     def set_agent_state(
         self,
diff --git a/habitat/tasks/nav/nav_task.py b/habitat/tasks/nav/nav_task.py
index abd33840ea8b5852c9095b6658e883528409127e..f99c7eb20d2dcb08029fe30337a7331c8599e2f0 100644
--- a/habitat/tasks/nav/nav_task.py
+++ b/habitat/tasks/nav/nav_task.py
@@ -18,7 +18,11 @@ from habitat.core.simulator import (
     SensorTypes,
     SensorSuite,
 )
-from habitat.tasks.utils import quaternion_to_rotation, cartesian_to_polar
+from habitat.tasks.utils import (
+    quaternion_to_rotation,
+    cartesian_to_polar,
+    quaternion_rotate_vector,
+)
 
 COLLISION_PROXIMITY_TOLERANCE: float = 1e-3
 
@@ -181,17 +185,14 @@ class PointGoalSensor(habitat.Sensor):
     def get_observation(self, observations, episode):
         agent_state = self._sim.get_agent_state()
         ref_position = agent_state.position
-        ref_rotation = agent_state.rotation
+        rotation_world_agent = agent_state.rotation
 
         direction_vector = (
             np.array(episode.goals[0].position, dtype=np.float32)
             - ref_position
         )
-        rotation_world_agent = quaternion_to_rotation(
-            ref_rotation[3], ref_rotation[0], ref_rotation[1], ref_rotation[2]
-        )
-        direction_vector_agent = np.dot(
-            rotation_world_agent.T, direction_vector
+        direction_vector_agent = quaternion_rotate_vector(
+            rotation_world_agent.inverse(), direction_vector
         )
 
         if self._goal_format == "POLAR":
@@ -227,17 +228,14 @@ class HeadingSensor(habitat.Sensor):
 
     def get_observation(self, observations, episode):
         agent_state = self._sim.get_agent_state()
-        # Quaternion is in x, y, z, w format
-        ref_rotation = agent_state.rotation
+        rotation_world_agent = agent_state.rotation
 
         direction_vector = np.array([0, 0, -1])
 
-        rotation_world_agent = quaternion_to_rotation(
-            ref_rotation[3], ref_rotation[0], ref_rotation[1], ref_rotation[2]
+        heading_vector = quaternion_rotate_vector(
+            rotation_world_agent.inverse(), direction_vector
         )
 
-        heading_vector = np.dot(rotation_world_agent.T, direction_vector)
-
         phi = cartesian_to_polar(-heading_vector[2], heading_vector[0])[1]
         return np.array(phi)
 
diff --git a/habitat/tasks/nav/shortest_path_follower.py b/habitat/tasks/nav/shortest_path_follower.py
index b1b3b217d62b9584dc1c8b7ae26172f5b84d0d83..4084b0416466fc2c04b122252f48c475cbb22a4b 100644
--- a/habitat/tasks/nav/shortest_path_follower.py
+++ b/habitat/tasks/nav/shortest_path_follower.py
@@ -4,7 +4,7 @@ import habitat_sim
 import numpy as np
 
 from habitat.sims.habitat_simulator import HabitatSim
-from habitat.sims.habitat_simulator import SIM_NAME_TO_ACTION, SimulatorActions
+from habitat.sims.habitat_simulator import SimulatorActions
 from habitat.utils.geometry_utils import (
     angle_between_quaternions,
     quaternion_from_two_vectors,
@@ -16,7 +16,7 @@ EPSILON = 1e-6
 
 
 def action_to_one_hot(action: int) -> np.array:
-    one_hot = np.zeros(len(SIM_NAME_TO_ACTION), dtype=np.float32)
+    one_hot = np.zeros(len(SimulatorActions), dtype=np.float32)
     one_hot[action] = 1
     return one_hot
 
@@ -59,7 +59,7 @@ class ShortestPathFollower:
         self, action: SimulatorActions
     ) -> Union[SimulatorActions, np.array]:
         if self._return_one_hot:
-            return action_to_one_hot(SIM_NAME_TO_ACTION[action.value])
+            return action_to_one_hot(action.value)
         else:
             return action
 
@@ -82,22 +82,17 @@ class ShortestPathFollower:
         self, grad_dir: np.quaternion
     ) -> Union[SimulatorActions, np.array]:
         current_state = self._sim.get_agent_state()
-        alpha = angle_between_quaternions(
-            grad_dir, quaternion_xyzw_to_wxyz(current_state.rotation)
-        )
+        alpha = angle_between_quaternions(grad_dir, current_state.rotation)
         if alpha <= np.deg2rad(self._sim.config.TURN_ANGLE) + EPSILON:
             return self._get_return_value(SimulatorActions.FORWARD)
         else:
-            sim_action = SIM_NAME_TO_ACTION[SimulatorActions.LEFT.value]
+            sim_action = SimulatorActions.LEFT.value
             self._sim.step(sim_action)
             best_turn = (
                 SimulatorActions.LEFT
                 if (
                     angle_between_quaternions(
-                        grad_dir,
-                        quaternion_xyzw_to_wxyz(
-                            self._sim.get_agent_state().rotation
-                        ),
+                        grad_dir, self._sim.get_agent_state().rotation
                     )
                     < alpha
                 )
@@ -145,7 +140,7 @@ class ShortestPathFollower:
             best_geodesic_delta = -2 * self._max_delta
             best_rotation = current_rotation
             for _ in range(0, 360, self._sim.config.TURN_ANGLE):
-                sim_action = SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value]
+                sim_action = SimulatorActions.FORWARD.value
                 self._sim.step(sim_action)
                 new_delta = current_dist - self._geo_dist(goal_pos)
 
@@ -169,12 +164,12 @@ class ShortestPathFollower:
                     reset_sensors=False,
                 )
 
-                sim_action = SIM_NAME_TO_ACTION[SimulatorActions.LEFT.value]
+                sim_action = SimulatorActions.LEFT.value
                 self._sim.step(sim_action)
 
             self._reset_agent_state(current_state)
 
-            max_grad_dir = quaternion_xyzw_to_wxyz(best_rotation)
+            max_grad_dir = best_rotation
 
         return max_grad_dir
 
diff --git a/habitat/tasks/utils.py b/habitat/tasks/utils.py
index eb655de20b8707a3838439fe2c90044c3881e1c1..077da1d52fd3e112d507af2832947c2a30c859a9 100644
--- a/habitat/tasks/utils.py
+++ b/habitat/tasks/utils.py
@@ -5,6 +5,7 @@
 # LICENSE file in the root directory of this source tree.
 
 import numpy as np
+import quaternion
 
 
 def quaternion_to_rotation(q_r, q_i, q_j, q_k):
@@ -35,6 +36,21 @@ def quaternion_to_rotation(q_r, q_i, q_j, q_k):
     return rotation_mat
 
 
+def quaternion_rotate_vector(quat: np.quaternion, v: np.array) -> np.array:
+    r"""Rotates a vector by a quaternion
+
+    Args:
+        quaternion: The quaternion to rotate by
+        v: The vector to rotate
+
+    Returns:
+        np.array: The rotated vector
+    """
+    vq = np.quaternion(0, 0, 0, 0)
+    vq.imag = v
+    return (quat * vq * quat.inverse()).imag
+
+
 def cartesian_to_polar(x, y):
     rho = np.sqrt(x ** 2 + y ** 2)
     phi = np.arctan2(y, x)
diff --git a/test/test_baseline_agents.py b/test/test_baseline_agents.py
index 230e5949d0047b03a65260bbc1f4894b7780c792..81dad3b99bac6b3b1d993f6d575b1e2fd51a568f 100644
--- a/test/test_baseline_agents.py
+++ b/test/test_baseline_agents.py
@@ -4,14 +4,25 @@
 # This source code is licensed under the MIT license found in the
 # LICENSE file in the root directory of this source tree.
 
-from baselines.agents import simple_agents, ppo_agents
 import habitat
 import os
 import pytest
+from baselines.agents import simple_agents
+
+try:
+    import torch
+
+    has_torch = True
+except ImportError:
+    has_torch = False
+
+if has_torch:
+    from baselines.agents import ppo_agents
 
 CFG_TEST = "test/habitat_all_sensors_test.yaml"
 
 
+@pytest.mark.skipif(not has_torch, reason="Test needs torch")
 def test_ppo_agents():
     config = ppo_agents.get_defaut_config()
     config.MODEL_PATH = ""
diff --git a/test/test_habitat_env.py b/test/test_habitat_env.py
index 3852b4ed7b43338e3238f9935d074846667ae039..2094d5b13c20503f4b091adaa9c66ea3e46305bc 100644
--- a/test/test_habitat_env.py
+++ b/test/test_habitat_env.py
@@ -14,11 +14,7 @@ import habitat
 from habitat.config.default import get_config
 from habitat.core.simulator import AgentState
 from habitat.datasets.pointnav.pointnav_dataset import PointNavDatasetV1
-from habitat.sims.habitat_simulator import (
-    SimulatorActions,
-    SIM_ACTION_TO_NAME,
-    SIM_NAME_TO_ACTION,
-)
+from habitat.sims.habitat_simulator import SimulatorActions
 from habitat.tasks.nav.nav_task import NavigationEpisode, NavigationGoal
 
 CFG_TEST = "test/habitat_all_sensors_test.yaml"
@@ -75,8 +71,8 @@ def _vec_env_test_fn(configs, datasets, multiprocessing_start_method):
     )
     envs.reset()
     non_stop_actions = [
-        k
-        for k, v in SIM_ACTION_TO_NAME.items()
+        v
+        for v in range(len(SimulatorActions))
         if v != SimulatorActions.STOP.value
     ]
 
@@ -129,8 +125,8 @@ def test_threaded_vectorized_env():
     envs = habitat.ThreadedVectorEnv(env_fn_args=env_fn_args)
     envs.reset()
     non_stop_actions = [
-        k
-        for k, v in SIM_ACTION_TO_NAME.items()
+        v
+        for v in range(len(SimulatorActions))
         if v != SimulatorActions.STOP.value
     ]
 
@@ -159,8 +155,8 @@ def test_env():
     env.reset()
 
     non_stop_actions = [
-        k
-        for k, v in SIM_ACTION_TO_NAME.items()
+        v
+        for v in range(len(SimulatorActions))
         if v != SimulatorActions.STOP.value
     ]
     for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
@@ -174,7 +170,7 @@ def test_env():
 
     env.reset()
 
-    env.step(SIM_NAME_TO_ACTION[SimulatorActions.STOP.value])
+    env.step(SimulatorActions.STOP.value)
     # check for STOP action
     assert env.episode_over is True, (
         "episode should be over after STOP " "action"
@@ -203,8 +199,8 @@ def test_rl_vectorized_envs():
     envs = habitat.VectorEnv(make_env_fn=make_rl_env, env_fn_args=env_fn_args)
     envs.reset()
     non_stop_actions = [
-        k
-        for k, v in SIM_ACTION_TO_NAME.items()
+        v
+        for v in range(len(SimulatorActions))
         if v != SimulatorActions.STOP.value
     ]
 
@@ -242,8 +238,8 @@ def test_rl_env():
     observation = env.reset()
 
     non_stop_actions = [
-        k
-        for k, v in SIM_ACTION_TO_NAME.items()
+        v
+        for v in range(len(SimulatorActions))
         if v != SimulatorActions.STOP.value
     ]
     for _ in range(config.ENVIRONMENT.MAX_EPISODE_STEPS):
@@ -255,14 +251,14 @@ def test_rl_env():
     assert done is True, "episodes should be over after max_episode_steps"
 
     env.reset()
-    observation, reward, done, info = env.step(
-        SIM_NAME_TO_ACTION[SimulatorActions.STOP.value]
-    )
+    observation, reward, done, info = env.step(SimulatorActions.STOP.value)
     assert done is True, "done should be true after STOP action"
 
     env.close()
 
 
+# TODO Bring back this test for the greedy follower
+@pytest.mark.skip
 def test_action_space_shortest_path():
     config = get_config()
     if not os.path.exists(config.SIMULATOR.SCENE):
diff --git a/test/test_sensors.py b/test/test_sensors.py
index 788bb72968e87c47ea7e9da1ccf6d00c0a12c673..dbdaa208d8e598b6c05adec55189cf1982d66d83 100644
--- a/test/test_sensors.py
+++ b/test/test_sensors.py
@@ -15,7 +15,7 @@ from habitat.tasks.nav.nav_task import (
     NavigationEpisode,
     COLLISION_PROXIMITY_TOLERANCE,
 )
-from habitat.sims.habitat_simulator import SimulatorActions, SIM_NAME_TO_ACTION
+from habitat.sims.habitat_simulator import SimulatorActions
 
 CFG_TEST = "test/habitat_all_sensors_test.yaml"
 
@@ -124,9 +124,9 @@ def test_collisions():
     np.random.seed(123)
 
     actions = [
-        SIM_NAME_TO_ACTION[SimulatorActions.FORWARD.value],
-        SIM_NAME_TO_ACTION[SimulatorActions.LEFT.value],
-        SIM_NAME_TO_ACTION[SimulatorActions.RIGHT.value],
+        SimulatorActions.FORWARD.value,
+        SimulatorActions.LEFT.value,
+        SimulatorActions.RIGHT.value,
     ]
 
     for _ in range(20):
diff --git a/test/test_trajectory_sim.py b/test/test_trajectory_sim.py
index db448bc9dd4b9d710821a112b86b156aec1c9c19..25bce23f4d1d5a0468a78b0f8299effe54336fc9 100644
--- a/test/test_trajectory_sim.py
+++ b/test/test_trajectory_sim.py
@@ -50,7 +50,7 @@ def test_sim_trajectory():
                     np.array(
                         test_trajectory["rotations"][i], dtype=np.float32
                     ),
-                    state.rotation,
+                    np.array([*state.rotation.imag, state.rotation.real]),
                 )
                 is True
             ), "mismatch in rotation " "at step {}".format(i)