Skip to content

Commit

Permalink
stompy design
Browse files Browse the repository at this point in the history
  • Loading branch information
codekansas committed Apr 1, 2024
1 parent b3afa0b commit b16d712
Show file tree
Hide file tree
Showing 9 changed files with 559 additions and 179 deletions.
7 changes: 7 additions & 0 deletions sim/env.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,10 @@

def model_dir() -> Path:
return Path(os.environ.get("MODEL_DIR", "models"))


def stompy_urdf_path() -> Path:
stompy_path = model_dir() / "robots" / "stompy" / "robot.urdf"
if not stompy_path.exists():
raise FileNotFoundError(f"URDF file not found: {stompy_path}")
return stompy_path.resolve()
Empty file added sim/humanoid_gym/__init__.py
Empty file.
230 changes: 230 additions & 0 deletions sim/humanoid_gym/stompy_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
"""Configuration for the Stompy humanoid robot."""

from humanoid.envs.base.legged_robot_config import LeggedRobotCfg, LeggedRobotCfgPPO

from sim.env import stompy_urdf_path


class StompyConfig(LeggedRobotCfg):
"""Configuration class for the XBotL humanoid robot."""

class env(LeggedRobotCfg.env): # noqa: N801
frame_stack = 15
c_frame_stack = 3
num_single_obs = 47
num_observations = int(frame_stack * num_single_obs)
single_num_privileged_obs = 73
num_privileged_obs = int(c_frame_stack * single_num_privileged_obs)
num_actions = 12
num_envs = 4096
episode_length_s = 24 # episode length in seconds
use_ref_actions = False

class safety: # noqa: N801
# safety factors
pos_limit = 1.0
vel_limit = 1.0
torque_limit = 0.85

class asset(LeggedRobotCfg.asset): # noqa: N801
file = str(stompy_urdf_path())

name = "Stompy"
foot_name = "ankle_roll"
knee_name = "knee"

terminate_after_contacts_on = ["base_link"]
penalize_contacts_on = ["base_link"]
self_collisions = 0 # 1 to disable, 0 to enable...bitwise filter
flip_visual_attachments = False
replace_cylinder_with_capsule = False
fix_base_link = False

class terrain(LeggedRobotCfg.terrain): # noqa: N801
mesh_type = "plane"
# mesh_type = 'trimesh'
curriculum = False
# rough terrain only:
measure_heights = False
static_friction = 0.6
dynamic_friction = 0.6
terrain_length = 8.0
terrain_width = 8.0
num_rows = 20 # number of terrain rows (levels)
num_cols = 20 # number of terrain cols (types)
max_init_terrain_level = 10 # starting curriculum state
# plane; obstacles; uniform; slope_up; slope_down, stair_up, stair_down
terrain_proportions = [0.2, 0.2, 0.4, 0.1, 0.1, 0, 0]
restitution = 0.0

class noise: # noqa: N801
add_noise = True
noise_level = 0.6 # scales other values

class noise_scales: # noqa: N801
dof_pos = 0.05
dof_vel = 0.5
ang_vel = 0.1
lin_vel = 0.05
quat = 0.03
height_measurements = 0.1

class init_state(LeggedRobotCfg.init_state): # noqa: N801
pos = [0.0, 0.0, 0.95]

default_joint_angles = { # = target angles [rad] when action = 0.0
"left_leg_yaw_joint": -0.0,
"left_leg_roll_joint": -0.0,
"left_leg_pitch_joint": 0.0,
"left_knee_joint": 0.0,
"left_ankle_pitch_joint": 0.0,
"left_ankle_roll_joint": 0.0,
"right_leg_yaw_joint": 0.0,
"right_leg_roll_joint": 0.0,
"right_leg_pitch_joint": -0.0,
"right_knee_joint": -0.0,
"right_ankle_pitch_joint": -0.0,
"right_ankle_roll_joint": 0.0,
}

class control(LeggedRobotCfg.control): # noqa: N801
# PD Drive parameters:
stiffness = {"leg_roll": 200.0, "leg_pitch": 350.0, "leg_yaw": 200.0, "knee": 350.0, "ankle": 15}
damping = {"leg_roll": 10, "leg_pitch": 10, "leg_yaw": 10, "knee": 10, "ankle": 10}

# action scale: target angle = actionScale * action + defaultAngle
action_scale = 0.25
# decimation: Number of control action updates @ sim DT per policy DT
decimation = 10 # 100hz

class sim(LeggedRobotCfg.sim): # noqa: N801
dt = 0.001 # 1000 Hz
substeps = 1 # 2
up_axis = 1 # 0 is y, 1 is z

class physx(LeggedRobotCfg.sim.physx): # noqa: N801
num_threads = 10
solver_type = 1 # 0: pgs, 1: tgs
num_position_iterations = 4
num_velocity_iterations = 0
contact_offset = 0.01 # [m]
rest_offset = 0.0 # [m]
bounce_threshold_velocity = 0.1 # [m/s]
max_depenetration_velocity = 1.0
max_gpu_contact_pairs = 2**23 # 2**24 -> needed for 8000 envs and more
default_buffer_size_multiplier = 5
# 0: never, 1: last sub-step, 2: all sub-steps (default=2)
contact_collection = 2

class domain_rand: # noqa: N801
randomize_friction = True
friction_range = [0.1, 2.0]
randomize_base_mass = True
added_mass_range = [-5.0, 5.0]
push_robots = True
push_interval_s = 4
max_push_vel_xy = 0.2
max_push_ang_vel = 0.4
dynamic_randomization = 0.02

class commands(LeggedRobotCfg.commands): # noqa: N801
# Vers: lin_vel_x, lin_vel_y, ang_vel_yaw, heading (in heading mode ang_vel_yaw is recomputed from heading error)
num_commands = 4
resampling_time = 8.0 # time before command are changed[s]
heading_command = True # if true: compute ang vel command from heading error

class ranges: # noqa: N801
lin_vel_x = [-0.3, 0.6] # min max [m/s]
lin_vel_y = [-0.3, 0.3] # min max [m/s]
ang_vel_yaw = [-0.3, 0.3] # min max [rad/s]
heading = [-3.14, 3.14]

class rewards: # noqa: N801
base_height_target = 0.89
min_dist = 0.2
max_dist = 0.5
# put some settings here for LLM parameter tuning
target_joint_pos_scale = 0.17 # rad
target_feet_height = 0.06 # m
cycle_time = 0.64 # sec
# if true negative total rewards are clipped at zero (avoids early termination problems)
only_positive_rewards = True
# tracking reward = exp(error*sigma)
tracking_sigma = 5
max_contact_force = 700 # forces above this value are penalized

class scales: # noqa: N801
# reference motion tracking
joint_pos = 1.6
feet_clearance = 1.0
feet_contact_number = 1.2
# gait
feet_air_time = 1.0
foot_slip = -0.05
feet_distance = 0.2
knee_distance = 0.2
# contact
feet_contact_forces = -0.01
# vel tracking
tracking_lin_vel = 1.2
tracking_ang_vel = 1.1
vel_mismatch_exp = 0.5 # lin_z; ang x,y
low_speed = 0.2
track_vel_hard = 0.5
# base pos
default_joint_pos = 0.5
orientation = 1.0
base_height = 0.2
base_acc = 0.2
# energy
action_smoothness = -0.002
torques = -1e-5
dof_vel = -5e-4
dof_acc = -1e-7
collision = -1.0

class normalization: # noqa: N801
class obs_scales: # noqa: N801
lin_vel = 2.0
ang_vel = 1.0
dof_pos = 1.0
dof_vel = 0.05
quat = 1.0
height_measurements = 5.0

clip_observations = 18.0
clip_actions = 18.0


class XBotLCfgPPO(LeggedRobotCfgPPO):
seed = 5
runner_class_name = "OnPolicyRunner" # DWLOnPolicyRunner

class policy: # noqa: N801
init_noise_std = 1.0
actor_hidden_dims = [512, 256, 128]
critic_hidden_dims = [768, 256, 128]

class algorithm(LeggedRobotCfgPPO.algorithm): # noqa: N801
entropy_coef = 0.001
learning_rate = 1e-5
num_learning_epochs = 2
gamma = 0.994
lam = 0.9
num_mini_batches = 4

class runner: # noqa: N801
policy_class_name = "ActorCritic"
algorithm_class_name = "PPO"
num_steps_per_env = 60 # per iteration
max_iterations = 3001 # number of policy updates

# logging
save_interval = 100 # check for potential saves every this many iterations
experiment_name = "XBot_ppo"
run_name = ""
# load and resume
resume = False
load_run = -1 # -1 = last run
checkpoint = -1 # -1 = last saved model
resume_path = None # updated from load_run and chkpt
Empty file added sim/scripts/__init__.py
Empty file.
Loading

0 comments on commit b16d712

Please sign in to comment.