Skip to content

First Robot

import asyncio
from threewe import Robot
async def main():
async with Robot(backend="mock") as robot:
image = robot.get_image()
await robot.move_to(x=2.0, y=1.0)
pose = robot.get_pose()
print(f"Arrived at: ({pose.x:.2f}, {pose.y:.2f})")
asyncio.run(main())

No ROS2, no Gazebo, no GPU needed. The mock backend simulates everything locally with zero dependencies beyond numpy.

Change one parameter to run on different backends:

# Local simulation (no dependencies)
Robot(backend="mock")
# Full physics simulation (Gazebo Harmonic)
Robot(backend="gazebo")
# GPU-accelerated training (Isaac Sim)
Robot(backend="isaac_sim")
# Real hardware (ROS2 Jazzy + Pi5 + ESP32)
Robot(backend="real")

All backends share the exact same API — zero code changes needed.

import gymnasium as gym
import threewe.gym # Registers environments
env = gym.make("3we/Navigation-v1", render_mode="rgb_array")
obs, info = env.reset()
for _ in range(1000):
action = env.action_space.sample()
obs, reward, terminated, truncated, info = env.step(action)
if terminated or truncated:
obs, info = env.reset()
env.close()
import asyncio
from threewe import Robot
async def main():
async with Robot(backend="mock") as robot:
result = await robot.execute_instruction("find the red bottle and stop near it")
print(f"Success: {result.success}")
print(f"Description: {result.description}")
asyncio.run(main())

Requires pip install threewe[ai] and OPENAI_API_KEY environment variable.

import asyncio
from threewe import Robot
from threewe.data import TrajectoryRecorder
async def main():
recorder = TrajectoryRecorder(output_dir="trajectories/")
async with Robot(backend="mock") as robot:
recorder.start(robot)
await robot.move_to(x=3.0, y=2.0)
await robot.move_forward(1.0)
await robot.rotate(1.57)
recorder.stop()
asyncio.run(main())
MethodDescription
robot.get_image()Camera image (480, 640, 3) uint8
robot.get_camera_image()Same as get_image()
robot.get_lidar_scan()360-degree LiDAR scan
robot.get_pose()Position (x, y, theta) in map frame
robot.get_velocity()Current body-frame velocity
robot.get_imu()IMU sensor data
robot.get_observation()Standardized dict for ML models
await robot.move_to(x, y)Navigate to goal with path planning
await robot.move_forward(d)Drive straight by d meters
await robot.rotate(a)Rotate by a radians
robot.set_velocity(vx, vy, omega)Direct velocity command
robot.stop()Halt all motion
await robot.execute_instruction(text)VLM-powered task execution
robot.execute_action(action)Execute policy network output